body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
2b3d666a531dec98fbcb07dc31247dfc0ac5a8f35ab6df078510177b79b013cb
def extract_eventualities_from_parsed_result(self, parsed_result, output_format='Eventuality', in_order=True, **kw): ' This method extracts eventualities from parsed_result of one sentence.\n ' if (output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities_from_parsed_result only supports Eventuality or json.') return self.eventuality_extractor.extract_from_parsed_result(parsed_result, output_format=output_format, in_order=in_order, **kw)
This method extracts eventualities from parsed_result of one sentence.
aser/extract/aser_extractor.py
extract_eventualities_from_parsed_result
HKUST-KnowComp/CSKB-Population
13
python
def extract_eventualities_from_parsed_result(self, parsed_result, output_format='Eventuality', in_order=True, **kw): ' \n ' if (output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities_from_parsed_result only supports Eventuality or json.') return self.eventuality_extractor.extract_from_parsed_result(parsed_result, output_format=output_format, in_order=in_order, **kw)
def extract_eventualities_from_parsed_result(self, parsed_result, output_format='Eventuality', in_order=True, **kw): ' \n ' if (output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities_from_parsed_result only supports Eventuality or json.') return self.eventuality_extractor.extract_from_parsed_result(parsed_result, output_format=output_format, in_order=in_order, **kw)<|docstring|>This method extracts eventualities from parsed_result of one sentence.<|endoftext|>
7728e3e72bb066392e894cba6fe0c2678ab0a471fb4a02f6a4e282b1520d7f55
def extract_eventualities_from_text(self, text, output_format='Eventuality', in_order=True, annotators=None, **kw): ' This method extracts all eventualities for each sentence.\n ' if (output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities_from_text only supports Eventuality or json.') parsed_result = self.parse_text(text, annotators=annotators) return self.extract_eventualities_from_parsed_result(parsed_result, output_format=output_format, in_order=in_order, **kw)
This method extracts all eventualities for each sentence.
aser/extract/aser_extractor.py
extract_eventualities_from_text
HKUST-KnowComp/CSKB-Population
13
python
def extract_eventualities_from_text(self, text, output_format='Eventuality', in_order=True, annotators=None, **kw): ' \n ' if (output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities_from_text only supports Eventuality or json.') parsed_result = self.parse_text(text, annotators=annotators) return self.extract_eventualities_from_parsed_result(parsed_result, output_format=output_format, in_order=in_order, **kw)
def extract_eventualities_from_text(self, text, output_format='Eventuality', in_order=True, annotators=None, **kw): ' \n ' if (output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities_from_text only supports Eventuality or json.') parsed_result = self.parse_text(text, annotators=annotators) return self.extract_eventualities_from_parsed_result(parsed_result, output_format=output_format, in_order=in_order, **kw)<|docstring|>This method extracts all eventualities for each sentence.<|endoftext|>
85e40e853c738a85b8452bff4b002f12fa0c905a6eb93865deeea8af8eeb4dd0
def extract_relations_from_parsed_result(self, parsed_result, para_eventualities, output_format='Relation', in_order=True, **kw): ' This method extracts relations among extracted eventualities.\n ' if (output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations_from_parsed_result only supports Relation or triple.') return self.relation_extractor.extract_from_parsed_result(parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw)
This method extracts relations among extracted eventualities.
aser/extract/aser_extractor.py
extract_relations_from_parsed_result
HKUST-KnowComp/CSKB-Population
13
python
def extract_relations_from_parsed_result(self, parsed_result, para_eventualities, output_format='Relation', in_order=True, **kw): ' \n ' if (output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations_from_parsed_result only supports Relation or triple.') return self.relation_extractor.extract_from_parsed_result(parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw)
def extract_relations_from_parsed_result(self, parsed_result, para_eventualities, output_format='Relation', in_order=True, **kw): ' \n ' if (output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations_from_parsed_result only supports Relation or triple.') return self.relation_extractor.extract_from_parsed_result(parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw)<|docstring|>This method extracts relations among extracted eventualities.<|endoftext|>
e5572d2ae80ab3d9ed1de37382902612520e35865bcc100592de67f1a1232b17
def extract_relations_from_text(self, text, output_format='Relation', in_order=True, annotators=None, **kw): ' This method extracts relations from parsed_result of one paragraph.\n ' if (output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations_from_text only supports Relation or triple.') parsed_result = self.parse_text(text, annotators=annotators) para_eventualities = self.extract_eventualities_from_parsed_result(parsed_result) return self.extract_relations_from_parsed_result(parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw)
This method extracts relations from parsed_result of one paragraph.
aser/extract/aser_extractor.py
extract_relations_from_text
HKUST-KnowComp/CSKB-Population
13
python
def extract_relations_from_text(self, text, output_format='Relation', in_order=True, annotators=None, **kw): ' \n ' if (output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations_from_text only supports Relation or triple.') parsed_result = self.parse_text(text, annotators=annotators) para_eventualities = self.extract_eventualities_from_parsed_result(parsed_result) return self.extract_relations_from_parsed_result(parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw)
def extract_relations_from_text(self, text, output_format='Relation', in_order=True, annotators=None, **kw): ' \n ' if (output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations_from_text only supports Relation or triple.') parsed_result = self.parse_text(text, annotators=annotators) para_eventualities = self.extract_eventualities_from_parsed_result(parsed_result) return self.extract_relations_from_parsed_result(parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw)<|docstring|>This method extracts relations from parsed_result of one paragraph.<|endoftext|>
73d87adbd2c89f9d76d4c152f8abbb0d46b5b33bb9666b254604932b816d7fb4
def extract_from_parsed_result(self, parsed_result, eventuality_output_format='Eventuality', relation_output_format='Relation', in_order=True, **kw): ' This method extracts eventualities and relations from parsed_result of one paragraph.\n ' if (eventuality_output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities only supports Eventuality or json.') if (relation_output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations only supports Relation or triple.') if (not isinstance(parsed_result, (list, tuple, dict))): raise NotImplementedError if isinstance(parsed_result, dict): is_single_sent = True parsed_result = [parsed_result] else: is_single_sent = False para_eventualities = self.extract_eventualities_from_parsed_result(parsed_result, output_format='Eventuality', in_order=True, **kw) para_relations = self.extract_relations_from_parsed_result(parsed_result, para_eventualities, output_format='Relation', in_order=True, **kw) if in_order: if (eventuality_output_format == 'json'): para_eventualities = [[eventuality.encode(encoding=None) for eventuality in sent_eventualities] for sent_eventualities in para_eventualities] if (relation_output_format == 'triple'): relations = [list(chain.from_iterable([relation.to_triple() for relation in sent_relations])) for sent_relations in para_relations] if is_single_sent: return (para_eventualities[0], para_relations[0]) else: return (para_eventualities, para_relations) else: eid2eventuality = dict() for eventuality in chain.from_iterable(para_eventualities): eid = eventuality.eid if (eid not in eid2eventuality): eid2eventuality[eid] = deepcopy(eventuality) else: eid2eventuality[eid].update(eventuality) if (eventuality_output_format == 'Eventuality'): eventualities = sorted(eid2eventuality.values(), key=(lambda e: e.eid)) elif (eventuality_output_format == 'json'): eventualities = sorted([eventuality.encode(encoding=None) for eventuality in eid2eventuality.values()], key=(lambda e: e['eid'])) rid2relation = dict() for relation in chain.from_iterable(para_relations): if (relation.rid not in rid2relation): rid2relation[relation.rid] = deepcopy(relation) else: rid2relation[relation.rid].update(relation) if (relation_output_format == 'Relation'): relations = sorted(rid2relation.values(), key=(lambda r: r.rid)) elif (relation_output_format == 'triple'): relations = sorted(chain.from_iterable([relation.to_triples() for relation in rid2relation.values()])) return (eventualities, relations)
This method extracts eventualities and relations from parsed_result of one paragraph.
aser/extract/aser_extractor.py
extract_from_parsed_result
HKUST-KnowComp/CSKB-Population
13
python
def extract_from_parsed_result(self, parsed_result, eventuality_output_format='Eventuality', relation_output_format='Relation', in_order=True, **kw): ' \n ' if (eventuality_output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities only supports Eventuality or json.') if (relation_output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations only supports Relation or triple.') if (not isinstance(parsed_result, (list, tuple, dict))): raise NotImplementedError if isinstance(parsed_result, dict): is_single_sent = True parsed_result = [parsed_result] else: is_single_sent = False para_eventualities = self.extract_eventualities_from_parsed_result(parsed_result, output_format='Eventuality', in_order=True, **kw) para_relations = self.extract_relations_from_parsed_result(parsed_result, para_eventualities, output_format='Relation', in_order=True, **kw) if in_order: if (eventuality_output_format == 'json'): para_eventualities = [[eventuality.encode(encoding=None) for eventuality in sent_eventualities] for sent_eventualities in para_eventualities] if (relation_output_format == 'triple'): relations = [list(chain.from_iterable([relation.to_triple() for relation in sent_relations])) for sent_relations in para_relations] if is_single_sent: return (para_eventualities[0], para_relations[0]) else: return (para_eventualities, para_relations) else: eid2eventuality = dict() for eventuality in chain.from_iterable(para_eventualities): eid = eventuality.eid if (eid not in eid2eventuality): eid2eventuality[eid] = deepcopy(eventuality) else: eid2eventuality[eid].update(eventuality) if (eventuality_output_format == 'Eventuality'): eventualities = sorted(eid2eventuality.values(), key=(lambda e: e.eid)) elif (eventuality_output_format == 'json'): eventualities = sorted([eventuality.encode(encoding=None) for eventuality in eid2eventuality.values()], key=(lambda e: e['eid'])) rid2relation = dict() for relation in chain.from_iterable(para_relations): if (relation.rid not in rid2relation): rid2relation[relation.rid] = deepcopy(relation) else: rid2relation[relation.rid].update(relation) if (relation_output_format == 'Relation'): relations = sorted(rid2relation.values(), key=(lambda r: r.rid)) elif (relation_output_format == 'triple'): relations = sorted(chain.from_iterable([relation.to_triples() for relation in rid2relation.values()])) return (eventualities, relations)
def extract_from_parsed_result(self, parsed_result, eventuality_output_format='Eventuality', relation_output_format='Relation', in_order=True, **kw): ' \n ' if (eventuality_output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities only supports Eventuality or json.') if (relation_output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations only supports Relation or triple.') if (not isinstance(parsed_result, (list, tuple, dict))): raise NotImplementedError if isinstance(parsed_result, dict): is_single_sent = True parsed_result = [parsed_result] else: is_single_sent = False para_eventualities = self.extract_eventualities_from_parsed_result(parsed_result, output_format='Eventuality', in_order=True, **kw) para_relations = self.extract_relations_from_parsed_result(parsed_result, para_eventualities, output_format='Relation', in_order=True, **kw) if in_order: if (eventuality_output_format == 'json'): para_eventualities = [[eventuality.encode(encoding=None) for eventuality in sent_eventualities] for sent_eventualities in para_eventualities] if (relation_output_format == 'triple'): relations = [list(chain.from_iterable([relation.to_triple() for relation in sent_relations])) for sent_relations in para_relations] if is_single_sent: return (para_eventualities[0], para_relations[0]) else: return (para_eventualities, para_relations) else: eid2eventuality = dict() for eventuality in chain.from_iterable(para_eventualities): eid = eventuality.eid if (eid not in eid2eventuality): eid2eventuality[eid] = deepcopy(eventuality) else: eid2eventuality[eid].update(eventuality) if (eventuality_output_format == 'Eventuality'): eventualities = sorted(eid2eventuality.values(), key=(lambda e: e.eid)) elif (eventuality_output_format == 'json'): eventualities = sorted([eventuality.encode(encoding=None) for eventuality in eid2eventuality.values()], key=(lambda e: e['eid'])) rid2relation = dict() for relation in chain.from_iterable(para_relations): if (relation.rid not in rid2relation): rid2relation[relation.rid] = deepcopy(relation) else: rid2relation[relation.rid].update(relation) if (relation_output_format == 'Relation'): relations = sorted(rid2relation.values(), key=(lambda r: r.rid)) elif (relation_output_format == 'triple'): relations = sorted(chain.from_iterable([relation.to_triples() for relation in rid2relation.values()])) return (eventualities, relations)<|docstring|>This method extracts eventualities and relations from parsed_result of one paragraph.<|endoftext|>
db47b675e98fa64d22db363e4b7a91653dcbfc10f50a4df0e8bd2d8c1b554dc4
def extract_from_text(self, text, eventuality_output_format='Eventuality', relation_output_format='Relation', in_order=True, annotators=None, **kw): ' This method extracts eventualities and relations for each sentence.\n ' if (eventuality_output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities only supports Eventuality or json.') if (relation_output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations only supports Relation or triple.') parsed_result = self.parse_text(text, annotators=annotators) return self.extract_from_parsed_result(parsed_result, eventuality_output_format=eventuality_output_format, relation_output_format=relation_output_format, in_order=in_order, **kw)
This method extracts eventualities and relations for each sentence.
aser/extract/aser_extractor.py
extract_from_text
HKUST-KnowComp/CSKB-Population
13
python
def extract_from_text(self, text, eventuality_output_format='Eventuality', relation_output_format='Relation', in_order=True, annotators=None, **kw): ' \n ' if (eventuality_output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities only supports Eventuality or json.') if (relation_output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations only supports Relation or triple.') parsed_result = self.parse_text(text, annotators=annotators) return self.extract_from_parsed_result(parsed_result, eventuality_output_format=eventuality_output_format, relation_output_format=relation_output_format, in_order=in_order, **kw)
def extract_from_text(self, text, eventuality_output_format='Eventuality', relation_output_format='Relation', in_order=True, annotators=None, **kw): ' \n ' if (eventuality_output_format not in ['Eventuality', 'json']): raise NotImplementedError('Error: extract_eventualities only supports Eventuality or json.') if (relation_output_format not in ['Relation', 'triple']): raise NotImplementedError('Error: extract_relations only supports Relation or triple.') parsed_result = self.parse_text(text, annotators=annotators) return self.extract_from_parsed_result(parsed_result, eventuality_output_format=eventuality_output_format, relation_output_format=relation_output_format, in_order=in_order, **kw)<|docstring|>This method extracts eventualities and relations for each sentence.<|endoftext|>
3382577cc980fd34ef2f75b8ec268606457c5a570d56da225469079441729452
def run(params): ' Route to API method ' if (2 <= len(params) <= 4): if (params[1] == 'widevine_install'): widevine_install() elif (params[1] == 'widevine_remove'): widevine_remove() elif (params[1] == 'check_inputstream'): if (len(params) == 3): check_inputstream(params[2]) elif (len(params) == 4): check_inputstream(params[2], drm=params[3]) elif (len(params) > 4): log('invalid API call, too many parameters') else: ADDON.openSettings()
Route to API method
addon.py
run
tito/script.module.inputstreamhelper
0
python
def run(params): ' ' if (2 <= len(params) <= 4): if (params[1] == 'widevine_install'): widevine_install() elif (params[1] == 'widevine_remove'): widevine_remove() elif (params[1] == 'check_inputstream'): if (len(params) == 3): check_inputstream(params[2]) elif (len(params) == 4): check_inputstream(params[2], drm=params[3]) elif (len(params) > 4): log('invalid API call, too many parameters') else: ADDON.openSettings()
def run(params): ' ' if (2 <= len(params) <= 4): if (params[1] == 'widevine_install'): widevine_install() elif (params[1] == 'widevine_remove'): widevine_remove() elif (params[1] == 'check_inputstream'): if (len(params) == 3): check_inputstream(params[2]) elif (len(params) == 4): check_inputstream(params[2], drm=params[3]) elif (len(params) > 4): log('invalid API call, too many parameters') else: ADDON.openSettings()<|docstring|>Route to API method<|endoftext|>
8e42f417501dab9e7269c6462d8cf676f761e85b6e231bbc2a929a9b50955f17
def check_inputstream(protocol, drm=None): ' The API interface to check inputstream ' Helper(protocol, drm=drm).check_inputstream()
The API interface to check inputstream
addon.py
check_inputstream
tito/script.module.inputstreamhelper
0
python
def check_inputstream(protocol, drm=None): ' ' Helper(protocol, drm=drm).check_inputstream()
def check_inputstream(protocol, drm=None): ' ' Helper(protocol, drm=drm).check_inputstream()<|docstring|>The API interface to check inputstream<|endoftext|>
e7b9fc84b5c061270f29d4257f9e75fd213760def40bd9690bef0186a9fe3765
def widevine_install(): ' The API interface to install Widevine CDM ' Helper('mpd', drm='widevine').install_widevine()
The API interface to install Widevine CDM
addon.py
widevine_install
tito/script.module.inputstreamhelper
0
python
def widevine_install(): ' ' Helper('mpd', drm='widevine').install_widevine()
def widevine_install(): ' ' Helper('mpd', drm='widevine').install_widevine()<|docstring|>The API interface to install Widevine CDM<|endoftext|>
816018d493dc02e5c4a26fafef15933166617a1f7161e5a10afff6faa2cc79be
def widevine_remove(): ' The API interface to remove Widevine CDMs ' Helper('mpd', drm='widevine').remove_widevine()
The API interface to remove Widevine CDMs
addon.py
widevine_remove
tito/script.module.inputstreamhelper
0
python
def widevine_remove(): ' ' Helper('mpd', drm='widevine').remove_widevine()
def widevine_remove(): ' ' Helper('mpd', drm='widevine').remove_widevine()<|docstring|>The API interface to remove Widevine CDMs<|endoftext|>
b1c99898ce5ad166e7f29465e7398139d2db67559ddc4ccb70a91d65cc2199dc
def prepare_optparser(): 'Prepare optparser object. New options will be added in this\n function first.\n ' usage = 'usage: %prog -c mysample.cfg -s A01A -1 A01_1.fq -2 A02_2.fq' description = 'Please set the sample name. e.g. L04A, L04C, L04T.' optparser = OptionParser(version='0.0.1', description=description, usage=usage, add_help_option=False) optparser.add_option('-h', '--help', action='help', help='Show this help message and exit.') optparser.add_option('-c', '--config', dest='config', default='config.cfg', type='string', help='Set the config File.[config.cfg]') optparser.add_option('-s', '--samplename', dest='samplename', type='string', help='Set the samplename.(Required)') optparser.add_option('-f', '--case_vcf', dest='case_vcf', type='string', help='You can set this to your case vcf file path.') optparser.add_option('-t', '--control_vcf', dest='control_vcf', type='string', default='', help='You can set this to your case vcf file path.') optparser.add_option('-i', '--in_bam', dest='in_bam', type='string', help='Set your bam file path.(eg. case,control, or case only)') optparser.add_option('-o', '--out_dir', dest='out_dir', type='string', default='vcf', help='Set the vcf file out_dir.[vcf]') return optparser
Prepare optparser object. New options will be added in this function first.
iseq/refinement.py
prepare_optparser
Miachol/iseq
12
python
def prepare_optparser(): 'Prepare optparser object. New options will be added in this\n function first.\n ' usage = 'usage: %prog -c mysample.cfg -s A01A -1 A01_1.fq -2 A02_2.fq' description = 'Please set the sample name. e.g. L04A, L04C, L04T.' optparser = OptionParser(version='0.0.1', description=description, usage=usage, add_help_option=False) optparser.add_option('-h', '--help', action='help', help='Show this help message and exit.') optparser.add_option('-c', '--config', dest='config', default='config.cfg', type='string', help='Set the config File.[config.cfg]') optparser.add_option('-s', '--samplename', dest='samplename', type='string', help='Set the samplename.(Required)') optparser.add_option('-f', '--case_vcf', dest='case_vcf', type='string', help='You can set this to your case vcf file path.') optparser.add_option('-t', '--control_vcf', dest='control_vcf', type='string', default=, help='You can set this to your case vcf file path.') optparser.add_option('-i', '--in_bam', dest='in_bam', type='string', help='Set your bam file path.(eg. case,control, or case only)') optparser.add_option('-o', '--out_dir', dest='out_dir', type='string', default='vcf', help='Set the vcf file out_dir.[vcf]') return optparser
def prepare_optparser(): 'Prepare optparser object. New options will be added in this\n function first.\n ' usage = 'usage: %prog -c mysample.cfg -s A01A -1 A01_1.fq -2 A02_2.fq' description = 'Please set the sample name. e.g. L04A, L04C, L04T.' optparser = OptionParser(version='0.0.1', description=description, usage=usage, add_help_option=False) optparser.add_option('-h', '--help', action='help', help='Show this help message and exit.') optparser.add_option('-c', '--config', dest='config', default='config.cfg', type='string', help='Set the config File.[config.cfg]') optparser.add_option('-s', '--samplename', dest='samplename', type='string', help='Set the samplename.(Required)') optparser.add_option('-f', '--case_vcf', dest='case_vcf', type='string', help='You can set this to your case vcf file path.') optparser.add_option('-t', '--control_vcf', dest='control_vcf', type='string', default=, help='You can set this to your case vcf file path.') optparser.add_option('-i', '--in_bam', dest='in_bam', type='string', help='Set your bam file path.(eg. case,control, or case only)') optparser.add_option('-o', '--out_dir', dest='out_dir', type='string', default='vcf', help='Set the vcf file out_dir.[vcf]') return optparser<|docstring|>Prepare optparser object. New options will be added in this function first.<|endoftext|>
d628172ef68cccfcf7f895b25aebdbf2e4c5910f922c585063b8d012e00d50e5
def opt_validate(optparser): 'Validate options from a OptParser object.\n Ret: Validated options object.\n ' (options, args) = optparser.parse_args() if (not options.config): optparser.print_help() sys.exit(1) elif (not options.samplename): optparser.print_help() sys.exit(1) elif (not options.case_vcf): optparser.print_help() sys.exit(1) return options
Validate options from a OptParser object. Ret: Validated options object.
iseq/refinement.py
opt_validate
Miachol/iseq
12
python
def opt_validate(optparser): 'Validate options from a OptParser object.\n Ret: Validated options object.\n ' (options, args) = optparser.parse_args() if (not options.config): optparser.print_help() sys.exit(1) elif (not options.samplename): optparser.print_help() sys.exit(1) elif (not options.case_vcf): optparser.print_help() sys.exit(1) return options
def opt_validate(optparser): 'Validate options from a OptParser object.\n Ret: Validated options object.\n ' (options, args) = optparser.parse_args() if (not options.config): optparser.print_help() sys.exit(1) elif (not options.samplename): optparser.print_help() sys.exit(1) elif (not options.case_vcf): optparser.print_help() sys.exit(1) return options<|docstring|>Validate options from a OptParser object. Ret: Validated options object.<|endoftext|>
faf11e71176fc6847ec6bb6bde45723d14dac6746857d5603a58eea96b9544f0
def lambda_handler(event=None, context=None): '\n AWS Lambda handler.\n\n ' wzdx_registry = WZDxFeedRegistry(DATASET_ID, socrata_params=json.loads(SOCRATA_PARAMS), lambda_to_trigger=LAMBDA_TO_TRIGGER, logger=logger) wzdx_registry.ingest()
AWS Lambda handler.
lambda__wzdx_trigger_ingest.py
lambda_handler
usdot-its-jpo-data-portal/wzdx_registry
0
python
def lambda_handler(event=None, context=None): '\n \n\n ' wzdx_registry = WZDxFeedRegistry(DATASET_ID, socrata_params=json.loads(SOCRATA_PARAMS), lambda_to_trigger=LAMBDA_TO_TRIGGER, logger=logger) wzdx_registry.ingest()
def lambda_handler(event=None, context=None): '\n \n\n ' wzdx_registry = WZDxFeedRegistry(DATASET_ID, socrata_params=json.loads(SOCRATA_PARAMS), lambda_to_trigger=LAMBDA_TO_TRIGGER, logger=logger) wzdx_registry.ingest()<|docstring|>AWS Lambda handler.<|endoftext|>
5d54e9e06d8118dd04f8e2af49c92f11478ec832865c8c79c8d161b21753eb70
def main(argv): '\n Main entry point of this utility application.\n\n This is simply a function called by the checking of namespace __main__, at\n the end of this script (in order to execute only when this script is ran\n directly).\n\n Parameters\n ------\n argv: list of str\n Arguments received from the command line.\n ' annotationsPath = 'C:/Users/luigi/Dropbox/Doutorado/dataset/annotation-all' print('Reading data...') data = OrderedDict() tot = OrderedDict() for (dirpath, _, filenames) in os.walk(annotationsPath): for f in filenames: name = os.path.splitext(f)[0] parts = name.split('-') if ((len(parts) != 2) or (parts[1] != 'face')): continue subject = parts[0].split('_')[1] fileName = os.path.join(dirpath, f) print('\tfile {}...'.format(fileName)) fails = [] lastFrame = 0 with open(fileName, 'r', newline='') as file: reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) next(reader, None) for row in reader: lastFrame = int(row[0]) if (not any([float(i) for i in row[1:]])): fails.append(int(row[0])) tot[subject] = lastFrame data[subject] = fails print('Plotting data...') subjects = [] times = [] fails = [] for (s, v) in data.items(): fail = ((len(v) / tot[s]) * 100) fails.append([int(s), fail]) print('Subject: {} Fails: {:.2f}%'.format(s, fail)) for f in v: subjects.append(int(s)) times.append(((f / 30) / 60)) ax = sns.stripplot(x=subjects, y=times, linewidth=1) ax.set_xlabel('Subjects', fontsize=15) ax.set_ylabel('Video Progress (in Minutes)', fontsize=15) ax.set_ylim([0, 10]) fails = np.array(fails) np.savetxt('fails.csv', fails, fmt=('%d', '%.5f'), delimiter=',', header='subject,fails (percent)') mng = plt.get_current_fig_manager() mng.window.state('zoomed') plt.show()
Main entry point of this utility application. This is simply a function called by the checking of namespace __main__, at the end of this script (in order to execute only when this script is ran directly). Parameters ------ argv: list of str Arguments received from the command line.
fsdk/reports/faces.py
main
luigivieira/fsdk
0
python
def main(argv): '\n Main entry point of this utility application.\n\n This is simply a function called by the checking of namespace __main__, at\n the end of this script (in order to execute only when this script is ran\n directly).\n\n Parameters\n ------\n argv: list of str\n Arguments received from the command line.\n ' annotationsPath = 'C:/Users/luigi/Dropbox/Doutorado/dataset/annotation-all' print('Reading data...') data = OrderedDict() tot = OrderedDict() for (dirpath, _, filenames) in os.walk(annotationsPath): for f in filenames: name = os.path.splitext(f)[0] parts = name.split('-') if ((len(parts) != 2) or (parts[1] != 'face')): continue subject = parts[0].split('_')[1] fileName = os.path.join(dirpath, f) print('\tfile {}...'.format(fileName)) fails = [] lastFrame = 0 with open(fileName, 'r', newline=) as file: reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) next(reader, None) for row in reader: lastFrame = int(row[0]) if (not any([float(i) for i in row[1:]])): fails.append(int(row[0])) tot[subject] = lastFrame data[subject] = fails print('Plotting data...') subjects = [] times = [] fails = [] for (s, v) in data.items(): fail = ((len(v) / tot[s]) * 100) fails.append([int(s), fail]) print('Subject: {} Fails: {:.2f}%'.format(s, fail)) for f in v: subjects.append(int(s)) times.append(((f / 30) / 60)) ax = sns.stripplot(x=subjects, y=times, linewidth=1) ax.set_xlabel('Subjects', fontsize=15) ax.set_ylabel('Video Progress (in Minutes)', fontsize=15) ax.set_ylim([0, 10]) fails = np.array(fails) np.savetxt('fails.csv', fails, fmt=('%d', '%.5f'), delimiter=',', header='subject,fails (percent)') mng = plt.get_current_fig_manager() mng.window.state('zoomed') plt.show()
def main(argv): '\n Main entry point of this utility application.\n\n This is simply a function called by the checking of namespace __main__, at\n the end of this script (in order to execute only when this script is ran\n directly).\n\n Parameters\n ------\n argv: list of str\n Arguments received from the command line.\n ' annotationsPath = 'C:/Users/luigi/Dropbox/Doutorado/dataset/annotation-all' print('Reading data...') data = OrderedDict() tot = OrderedDict() for (dirpath, _, filenames) in os.walk(annotationsPath): for f in filenames: name = os.path.splitext(f)[0] parts = name.split('-') if ((len(parts) != 2) or (parts[1] != 'face')): continue subject = parts[0].split('_')[1] fileName = os.path.join(dirpath, f) print('\tfile {}...'.format(fileName)) fails = [] lastFrame = 0 with open(fileName, 'r', newline=) as file: reader = csv.reader(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) next(reader, None) for row in reader: lastFrame = int(row[0]) if (not any([float(i) for i in row[1:]])): fails.append(int(row[0])) tot[subject] = lastFrame data[subject] = fails print('Plotting data...') subjects = [] times = [] fails = [] for (s, v) in data.items(): fail = ((len(v) / tot[s]) * 100) fails.append([int(s), fail]) print('Subject: {} Fails: {:.2f}%'.format(s, fail)) for f in v: subjects.append(int(s)) times.append(((f / 30) / 60)) ax = sns.stripplot(x=subjects, y=times, linewidth=1) ax.set_xlabel('Subjects', fontsize=15) ax.set_ylabel('Video Progress (in Minutes)', fontsize=15) ax.set_ylim([0, 10]) fails = np.array(fails) np.savetxt('fails.csv', fails, fmt=('%d', '%.5f'), delimiter=',', header='subject,fails (percent)') mng = plt.get_current_fig_manager() mng.window.state('zoomed') plt.show()<|docstring|>Main entry point of this utility application. This is simply a function called by the checking of namespace __main__, at the end of this script (in order to execute only when this script is ran directly). Parameters ------ argv: list of str Arguments received from the command line.<|endoftext|>
c3d2c3911806f9dcc486aab152769c80156ed1fb2466c35e9575dd319f66457c
def __init__(self, universe, selection1='protein', selection2='not resname SOL', water_selection='resname SOL', order=1, selection1_type='both', update_selection=False, update_water_selection=True, filter_first=True, distance_type='hydrogen', distance=3.0, angle=120.0, forcefield='CHARMM27', donors=None, acceptors=None, output_format='sele1_sele2', debug=None, verbose=False, pbc=False, **kwargs): 'Set up the calculation of water bridges between two selections in a\n universe.\n\n The timeseries is accessible as the attribute\n :attr:`WaterBridgeAnalysis.timeseries`.\n\n If no hydrogen bonds are detected or if the initial check fails, look\n at the log output (enable with :func:`MDAnalysis.start_logging` and set\n `verbose` ``=True``). It is likely that the default names for donors\n and acceptors are not suitable (especially for non-standard\n ligands). In this case, either change the `forcefield` or use\n customized `donors` and/or `acceptors`.\n\n Parameters\n ----------\n universe : Universe\n Universe object\n selection1 : str (optional)\n Selection string for first selection [\'protein\']\n selection2 : str (optional)\n Selection string for second selection [\'not resname SOL\']\n This string selects everything except water where water is assumed\n to have a residue name as SOL.\n water_selection : str (optional)\n Selection string for bridging water selection [\'resname SOL\']\n The default selection assumes that the water molecules have residue\n name "SOL". Change it to the appropriate selection for your\n specific force field.\n\n However, in theory, this selection can be anything which forms\n a hydrogen bond with selection 1 and selection 2.\n order : int (optional)\n The maximum number of water bridges linking both selections.\n if the order is set to 3, then all the residues linked with less than\n three water molecules will be detected. [1]\n\n Computation of high order water bridges can be very time-consuming.\n Think carefully before running the calculation, do you really want\n to compute the 20th order water bridge between domain A and domain B\n or you just want to know the third order water bridge between two residues.\n selection1_type : {"donor", "acceptor", "both"} (optional)\n Selection 1 can be \'donor\', \'acceptor\' or \'both\'. Note that the\n value for `selection1_type` automatically determines how\n `selection2` handles donors and acceptors: If `selection1` contains\n \'both\' then `selection2` will also contain \'both\'. If `selection1`\n is set to \'donor\' then `selection2` is \'acceptor\' (and vice versa).\n [\'both\'].\n update_selection : bool (optional)\n Update selection 1 and 2 at each frame. Setting to ``True`` if the\n selection is not static. Selections are filtered first to speed up\n performance. Thus, setting to ``True`` is recommended if contact\n surface between selection 1 and selection 2 is constantly\n changing. [``False``]\n update_water_selection : bool (optional)\n Update selection of water at each frame. Setting to ``False`` is\n **only** recommended when the total amount of water molecules in the\n simulation are small and when water molecules remain static across\n the simulation.\n\n However, in normal simulations, only a tiny proportion of water is\n engaged in the formation of water bridge. It is recommended to\n update the water selection and set keyword `filter_first` to\n ``True`` so as to filter out water not residing between the two\n selections. [``True``]\n filter_first : bool (optional)\n Filter the water selection to only include water within 4 Å + `order` *\n (2 Å + `distance`) away from `both` selection 1 and selection 2.\n Selection 1 and selection 2 are both filtered to only include atoms\n with the same distance away from the other selection. [``True``]\n distance : float (optional)\n Distance cutoff for hydrogen bonds; only interactions with a H-A\n distance <= `distance` (and the appropriate D-H-A angle, see\n `angle`) are recorded. (Note: `distance_type` can change this to\n the D-A distance.) [3.0]\n angle : float (optional)\n Angle cutoff for hydrogen bonds; an ideal H-bond has an angle of\n 180º. A hydrogen bond is only recorded if the D-H-A angle is\n >= `angle`. The default of 120º also finds fairly non-specific\n hydrogen interactions and possibly better value is 150º. [120.0]\n forcefield : {"CHARMM27", "GLYCAM06", "other"} (optional)\n Name of the forcefield used. Switches between different\n :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS` and\n :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS` values.\n ["CHARMM27"]\n donors : sequence (optional)\n Extra H donor atom types (in addition to those in\n :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS`), must be a sequence.\n acceptors : sequence (optional)\n Extra H acceptor atom types (in addition to those in\n :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS`), must be a\n sequence.\n distance_type : {"hydrogen", "heavy"} (optional)\n Measure hydrogen bond lengths between donor and acceptor heavy\n atoms ("heavy") or between donor hydrogen and acceptor heavy\n atom ("hydrogen"). If using "heavy" then one should set the\n *distance* cutoff to a higher value such as 3.5 Å. ["hydrogen"]\n output_format: {"sele1_sele2", "donor_acceptor"} (optional)\n Setting the output format for timeseries and table. If set to\n "sele1_sele2", for each hydrogen bond, the one close to selection 1\n will be placed before selection 2. If set to "donor_acceptor", the\n donor will be placed before acceptor. "sele1_sele2"]\n debug : bool (optional)\n If set to ``True`` enables per-frame debug logging. This is disabled\n by default because it generates a very large amount of output in\n the log file. (Note that a logger must have been started to see\n the output, e.g. using :func:`MDAnalysis.start_logging`.)\n verbose : bool (optional)\n Toggle progress output. (Can also be given as keyword argument to\n :meth:`run`.)\n\n Notes\n -----\n In order to speed up processing, atoms are filtered by a coarse\n distance criterion before a detailed hydrogen bonding analysis is\n performed (`filter_first` = ``True``).\n\n If selection 1 and selection 2 are very mobile during the simulation\n and the contact surface is constantly changing (i.e. residues are\n moving farther than 4 Å + `order` * (2 Å + `distance`)), you might\n consider setting the `update_selection` keywords to ``True``\n to ensure correctness.\n\n .. versionchanged 0.20.0\n The :attr:`WaterBridgeAnalysis.timeseries` has been updated\n see :attr:`WaterBridgeAnalysis.timeseries` for detail.\n This class is now based on\n :class:`~MDAnalysis.analysis.base.AnalysisBase`.\n\n\n ' super(WaterBridgeAnalysis, self).__init__(universe.trajectory, **kwargs) self.water_selection = water_selection self.update_water_selection = update_water_selection self.debug = debug self.output_format = output_format self.u = universe self.selection1 = selection1 self.selection2 = selection2 self.selection1_type = selection1_type if (selection1 == selection2): self.selection1_type = 'donor' self.update_selection = update_selection self.filter_first = filter_first self.distance = distance self.distance_type = distance_type self.angle = angle self.pbc = (pbc and all(self.u.dimensions[:3])) self.order = order if (donors is None): donors = () if (acceptors is None): acceptors = () self.forcefield = forcefield self.donors = tuple(set(self.DEFAULT_DONORS[forcefield]).union(donors)) self.acceptors = tuple(set(self.DEFAULT_ACCEPTORS[forcefield]).union(acceptors)) if (self.selection1_type not in ('both', 'donor', 'acceptor')): raise ValueError('HydrogenBondAnalysis: Invalid selection type {0!s}'.format(self.selection1_type)) self._network = [] self.timesteps = None self._log_parameters()
Set up the calculation of water bridges between two selections in a universe. The timeseries is accessible as the attribute :attr:`WaterBridgeAnalysis.timeseries`. If no hydrogen bonds are detected or if the initial check fails, look at the log output (enable with :func:`MDAnalysis.start_logging` and set `verbose` ``=True``). It is likely that the default names for donors and acceptors are not suitable (especially for non-standard ligands). In this case, either change the `forcefield` or use customized `donors` and/or `acceptors`. Parameters ---------- universe : Universe Universe object selection1 : str (optional) Selection string for first selection ['protein'] selection2 : str (optional) Selection string for second selection ['not resname SOL'] This string selects everything except water where water is assumed to have a residue name as SOL. water_selection : str (optional) Selection string for bridging water selection ['resname SOL'] The default selection assumes that the water molecules have residue name "SOL". Change it to the appropriate selection for your specific force field. However, in theory, this selection can be anything which forms a hydrogen bond with selection 1 and selection 2. order : int (optional) The maximum number of water bridges linking both selections. if the order is set to 3, then all the residues linked with less than three water molecules will be detected. [1] Computation of high order water bridges can be very time-consuming. Think carefully before running the calculation, do you really want to compute the 20th order water bridge between domain A and domain B or you just want to know the third order water bridge between two residues. selection1_type : {"donor", "acceptor", "both"} (optional) Selection 1 can be 'donor', 'acceptor' or 'both'. Note that the value for `selection1_type` automatically determines how `selection2` handles donors and acceptors: If `selection1` contains 'both' then `selection2` will also contain 'both'. If `selection1` is set to 'donor' then `selection2` is 'acceptor' (and vice versa). ['both']. update_selection : bool (optional) Update selection 1 and 2 at each frame. Setting to ``True`` if the selection is not static. Selections are filtered first to speed up performance. Thus, setting to ``True`` is recommended if contact surface between selection 1 and selection 2 is constantly changing. [``False``] update_water_selection : bool (optional) Update selection of water at each frame. Setting to ``False`` is **only** recommended when the total amount of water molecules in the simulation are small and when water molecules remain static across the simulation. However, in normal simulations, only a tiny proportion of water is engaged in the formation of water bridge. It is recommended to update the water selection and set keyword `filter_first` to ``True`` so as to filter out water not residing between the two selections. [``True``] filter_first : bool (optional) Filter the water selection to only include water within 4 Å + `order` * (2 Å + `distance`) away from `both` selection 1 and selection 2. Selection 1 and selection 2 are both filtered to only include atoms with the same distance away from the other selection. [``True``] distance : float (optional) Distance cutoff for hydrogen bonds; only interactions with a H-A distance <= `distance` (and the appropriate D-H-A angle, see `angle`) are recorded. (Note: `distance_type` can change this to the D-A distance.) [3.0] angle : float (optional) Angle cutoff for hydrogen bonds; an ideal H-bond has an angle of 180º. A hydrogen bond is only recorded if the D-H-A angle is >= `angle`. The default of 120º also finds fairly non-specific hydrogen interactions and possibly better value is 150º. [120.0] forcefield : {"CHARMM27", "GLYCAM06", "other"} (optional) Name of the forcefield used. Switches between different :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS` and :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS` values. ["CHARMM27"] donors : sequence (optional) Extra H donor atom types (in addition to those in :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS`), must be a sequence. acceptors : sequence (optional) Extra H acceptor atom types (in addition to those in :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS`), must be a sequence. distance_type : {"hydrogen", "heavy"} (optional) Measure hydrogen bond lengths between donor and acceptor heavy atoms ("heavy") or between donor hydrogen and acceptor heavy atom ("hydrogen"). If using "heavy" then one should set the *distance* cutoff to a higher value such as 3.5 Å. ["hydrogen"] output_format: {"sele1_sele2", "donor_acceptor"} (optional) Setting the output format for timeseries and table. If set to "sele1_sele2", for each hydrogen bond, the one close to selection 1 will be placed before selection 2. If set to "donor_acceptor", the donor will be placed before acceptor. "sele1_sele2"] debug : bool (optional) If set to ``True`` enables per-frame debug logging. This is disabled by default because it generates a very large amount of output in the log file. (Note that a logger must have been started to see the output, e.g. using :func:`MDAnalysis.start_logging`.) verbose : bool (optional) Toggle progress output. (Can also be given as keyword argument to :meth:`run`.) Notes ----- In order to speed up processing, atoms are filtered by a coarse distance criterion before a detailed hydrogen bonding analysis is performed (`filter_first` = ``True``). If selection 1 and selection 2 are very mobile during the simulation and the contact surface is constantly changing (i.e. residues are moving farther than 4 Å + `order` * (2 Å + `distance`)), you might consider setting the `update_selection` keywords to ``True`` to ensure correctness. .. versionchanged 0.20.0 The :attr:`WaterBridgeAnalysis.timeseries` has been updated see :attr:`WaterBridgeAnalysis.timeseries` for detail. This class is now based on :class:`~MDAnalysis.analysis.base.AnalysisBase`.
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
__init__
dtklinh/GBRDE
2
python
def __init__(self, universe, selection1='protein', selection2='not resname SOL', water_selection='resname SOL', order=1, selection1_type='both', update_selection=False, update_water_selection=True, filter_first=True, distance_type='hydrogen', distance=3.0, angle=120.0, forcefield='CHARMM27', donors=None, acceptors=None, output_format='sele1_sele2', debug=None, verbose=False, pbc=False, **kwargs): 'Set up the calculation of water bridges between two selections in a\n universe.\n\n The timeseries is accessible as the attribute\n :attr:`WaterBridgeAnalysis.timeseries`.\n\n If no hydrogen bonds are detected or if the initial check fails, look\n at the log output (enable with :func:`MDAnalysis.start_logging` and set\n `verbose` ``=True``). It is likely that the default names for donors\n and acceptors are not suitable (especially for non-standard\n ligands). In this case, either change the `forcefield` or use\n customized `donors` and/or `acceptors`.\n\n Parameters\n ----------\n universe : Universe\n Universe object\n selection1 : str (optional)\n Selection string for first selection [\'protein\']\n selection2 : str (optional)\n Selection string for second selection [\'not resname SOL\']\n This string selects everything except water where water is assumed\n to have a residue name as SOL.\n water_selection : str (optional)\n Selection string for bridging water selection [\'resname SOL\']\n The default selection assumes that the water molecules have residue\n name "SOL". Change it to the appropriate selection for your\n specific force field.\n\n However, in theory, this selection can be anything which forms\n a hydrogen bond with selection 1 and selection 2.\n order : int (optional)\n The maximum number of water bridges linking both selections.\n if the order is set to 3, then all the residues linked with less than\n three water molecules will be detected. [1]\n\n Computation of high order water bridges can be very time-consuming.\n Think carefully before running the calculation, do you really want\n to compute the 20th order water bridge between domain A and domain B\n or you just want to know the third order water bridge between two residues.\n selection1_type : {"donor", "acceptor", "both"} (optional)\n Selection 1 can be \'donor\', \'acceptor\' or \'both\'. Note that the\n value for `selection1_type` automatically determines how\n `selection2` handles donors and acceptors: If `selection1` contains\n \'both\' then `selection2` will also contain \'both\'. If `selection1`\n is set to \'donor\' then `selection2` is \'acceptor\' (and vice versa).\n [\'both\'].\n update_selection : bool (optional)\n Update selection 1 and 2 at each frame. Setting to ``True`` if the\n selection is not static. Selections are filtered first to speed up\n performance. Thus, setting to ``True`` is recommended if contact\n surface between selection 1 and selection 2 is constantly\n changing. [``False``]\n update_water_selection : bool (optional)\n Update selection of water at each frame. Setting to ``False`` is\n **only** recommended when the total amount of water molecules in the\n simulation are small and when water molecules remain static across\n the simulation.\n\n However, in normal simulations, only a tiny proportion of water is\n engaged in the formation of water bridge. It is recommended to\n update the water selection and set keyword `filter_first` to\n ``True`` so as to filter out water not residing between the two\n selections. [``True``]\n filter_first : bool (optional)\n Filter the water selection to only include water within 4 Å + `order` *\n (2 Å + `distance`) away from `both` selection 1 and selection 2.\n Selection 1 and selection 2 are both filtered to only include atoms\n with the same distance away from the other selection. [``True``]\n distance : float (optional)\n Distance cutoff for hydrogen bonds; only interactions with a H-A\n distance <= `distance` (and the appropriate D-H-A angle, see\n `angle`) are recorded. (Note: `distance_type` can change this to\n the D-A distance.) [3.0]\n angle : float (optional)\n Angle cutoff for hydrogen bonds; an ideal H-bond has an angle of\n 180º. A hydrogen bond is only recorded if the D-H-A angle is\n >= `angle`. The default of 120º also finds fairly non-specific\n hydrogen interactions and possibly better value is 150º. [120.0]\n forcefield : {"CHARMM27", "GLYCAM06", "other"} (optional)\n Name of the forcefield used. Switches between different\n :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS` and\n :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS` values.\n ["CHARMM27"]\n donors : sequence (optional)\n Extra H donor atom types (in addition to those in\n :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS`), must be a sequence.\n acceptors : sequence (optional)\n Extra H acceptor atom types (in addition to those in\n :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS`), must be a\n sequence.\n distance_type : {"hydrogen", "heavy"} (optional)\n Measure hydrogen bond lengths between donor and acceptor heavy\n atoms ("heavy") or between donor hydrogen and acceptor heavy\n atom ("hydrogen"). If using "heavy" then one should set the\n *distance* cutoff to a higher value such as 3.5 Å. ["hydrogen"]\n output_format: {"sele1_sele2", "donor_acceptor"} (optional)\n Setting the output format for timeseries and table. If set to\n "sele1_sele2", for each hydrogen bond, the one close to selection 1\n will be placed before selection 2. If set to "donor_acceptor", the\n donor will be placed before acceptor. "sele1_sele2"]\n debug : bool (optional)\n If set to ``True`` enables per-frame debug logging. This is disabled\n by default because it generates a very large amount of output in\n the log file. (Note that a logger must have been started to see\n the output, e.g. using :func:`MDAnalysis.start_logging`.)\n verbose : bool (optional)\n Toggle progress output. (Can also be given as keyword argument to\n :meth:`run`.)\n\n Notes\n -----\n In order to speed up processing, atoms are filtered by a coarse\n distance criterion before a detailed hydrogen bonding analysis is\n performed (`filter_first` = ``True``).\n\n If selection 1 and selection 2 are very mobile during the simulation\n and the contact surface is constantly changing (i.e. residues are\n moving farther than 4 Å + `order` * (2 Å + `distance`)), you might\n consider setting the `update_selection` keywords to ``True``\n to ensure correctness.\n\n .. versionchanged 0.20.0\n The :attr:`WaterBridgeAnalysis.timeseries` has been updated\n see :attr:`WaterBridgeAnalysis.timeseries` for detail.\n This class is now based on\n :class:`~MDAnalysis.analysis.base.AnalysisBase`.\n\n\n ' super(WaterBridgeAnalysis, self).__init__(universe.trajectory, **kwargs) self.water_selection = water_selection self.update_water_selection = update_water_selection self.debug = debug self.output_format = output_format self.u = universe self.selection1 = selection1 self.selection2 = selection2 self.selection1_type = selection1_type if (selection1 == selection2): self.selection1_type = 'donor' self.update_selection = update_selection self.filter_first = filter_first self.distance = distance self.distance_type = distance_type self.angle = angle self.pbc = (pbc and all(self.u.dimensions[:3])) self.order = order if (donors is None): donors = () if (acceptors is None): acceptors = () self.forcefield = forcefield self.donors = tuple(set(self.DEFAULT_DONORS[forcefield]).union(donors)) self.acceptors = tuple(set(self.DEFAULT_ACCEPTORS[forcefield]).union(acceptors)) if (self.selection1_type not in ('both', 'donor', 'acceptor')): raise ValueError('HydrogenBondAnalysis: Invalid selection type {0!s}'.format(self.selection1_type)) self._network = [] self.timesteps = None self._log_parameters()
def __init__(self, universe, selection1='protein', selection2='not resname SOL', water_selection='resname SOL', order=1, selection1_type='both', update_selection=False, update_water_selection=True, filter_first=True, distance_type='hydrogen', distance=3.0, angle=120.0, forcefield='CHARMM27', donors=None, acceptors=None, output_format='sele1_sele2', debug=None, verbose=False, pbc=False, **kwargs): 'Set up the calculation of water bridges between two selections in a\n universe.\n\n The timeseries is accessible as the attribute\n :attr:`WaterBridgeAnalysis.timeseries`.\n\n If no hydrogen bonds are detected or if the initial check fails, look\n at the log output (enable with :func:`MDAnalysis.start_logging` and set\n `verbose` ``=True``). It is likely that the default names for donors\n and acceptors are not suitable (especially for non-standard\n ligands). In this case, either change the `forcefield` or use\n customized `donors` and/or `acceptors`.\n\n Parameters\n ----------\n universe : Universe\n Universe object\n selection1 : str (optional)\n Selection string for first selection [\'protein\']\n selection2 : str (optional)\n Selection string for second selection [\'not resname SOL\']\n This string selects everything except water where water is assumed\n to have a residue name as SOL.\n water_selection : str (optional)\n Selection string for bridging water selection [\'resname SOL\']\n The default selection assumes that the water molecules have residue\n name "SOL". Change it to the appropriate selection for your\n specific force field.\n\n However, in theory, this selection can be anything which forms\n a hydrogen bond with selection 1 and selection 2.\n order : int (optional)\n The maximum number of water bridges linking both selections.\n if the order is set to 3, then all the residues linked with less than\n three water molecules will be detected. [1]\n\n Computation of high order water bridges can be very time-consuming.\n Think carefully before running the calculation, do you really want\n to compute the 20th order water bridge between domain A and domain B\n or you just want to know the third order water bridge between two residues.\n selection1_type : {"donor", "acceptor", "both"} (optional)\n Selection 1 can be \'donor\', \'acceptor\' or \'both\'. Note that the\n value for `selection1_type` automatically determines how\n `selection2` handles donors and acceptors: If `selection1` contains\n \'both\' then `selection2` will also contain \'both\'. If `selection1`\n is set to \'donor\' then `selection2` is \'acceptor\' (and vice versa).\n [\'both\'].\n update_selection : bool (optional)\n Update selection 1 and 2 at each frame. Setting to ``True`` if the\n selection is not static. Selections are filtered first to speed up\n performance. Thus, setting to ``True`` is recommended if contact\n surface between selection 1 and selection 2 is constantly\n changing. [``False``]\n update_water_selection : bool (optional)\n Update selection of water at each frame. Setting to ``False`` is\n **only** recommended when the total amount of water molecules in the\n simulation are small and when water molecules remain static across\n the simulation.\n\n However, in normal simulations, only a tiny proportion of water is\n engaged in the formation of water bridge. It is recommended to\n update the water selection and set keyword `filter_first` to\n ``True`` so as to filter out water not residing between the two\n selections. [``True``]\n filter_first : bool (optional)\n Filter the water selection to only include water within 4 Å + `order` *\n (2 Å + `distance`) away from `both` selection 1 and selection 2.\n Selection 1 and selection 2 are both filtered to only include atoms\n with the same distance away from the other selection. [``True``]\n distance : float (optional)\n Distance cutoff for hydrogen bonds; only interactions with a H-A\n distance <= `distance` (and the appropriate D-H-A angle, see\n `angle`) are recorded. (Note: `distance_type` can change this to\n the D-A distance.) [3.0]\n angle : float (optional)\n Angle cutoff for hydrogen bonds; an ideal H-bond has an angle of\n 180º. A hydrogen bond is only recorded if the D-H-A angle is\n >= `angle`. The default of 120º also finds fairly non-specific\n hydrogen interactions and possibly better value is 150º. [120.0]\n forcefield : {"CHARMM27", "GLYCAM06", "other"} (optional)\n Name of the forcefield used. Switches between different\n :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS` and\n :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS` values.\n ["CHARMM27"]\n donors : sequence (optional)\n Extra H donor atom types (in addition to those in\n :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS`), must be a sequence.\n acceptors : sequence (optional)\n Extra H acceptor atom types (in addition to those in\n :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS`), must be a\n sequence.\n distance_type : {"hydrogen", "heavy"} (optional)\n Measure hydrogen bond lengths between donor and acceptor heavy\n atoms ("heavy") or between donor hydrogen and acceptor heavy\n atom ("hydrogen"). If using "heavy" then one should set the\n *distance* cutoff to a higher value such as 3.5 Å. ["hydrogen"]\n output_format: {"sele1_sele2", "donor_acceptor"} (optional)\n Setting the output format for timeseries and table. If set to\n "sele1_sele2", for each hydrogen bond, the one close to selection 1\n will be placed before selection 2. If set to "donor_acceptor", the\n donor will be placed before acceptor. "sele1_sele2"]\n debug : bool (optional)\n If set to ``True`` enables per-frame debug logging. This is disabled\n by default because it generates a very large amount of output in\n the log file. (Note that a logger must have been started to see\n the output, e.g. using :func:`MDAnalysis.start_logging`.)\n verbose : bool (optional)\n Toggle progress output. (Can also be given as keyword argument to\n :meth:`run`.)\n\n Notes\n -----\n In order to speed up processing, atoms are filtered by a coarse\n distance criterion before a detailed hydrogen bonding analysis is\n performed (`filter_first` = ``True``).\n\n If selection 1 and selection 2 are very mobile during the simulation\n and the contact surface is constantly changing (i.e. residues are\n moving farther than 4 Å + `order` * (2 Å + `distance`)), you might\n consider setting the `update_selection` keywords to ``True``\n to ensure correctness.\n\n .. versionchanged 0.20.0\n The :attr:`WaterBridgeAnalysis.timeseries` has been updated\n see :attr:`WaterBridgeAnalysis.timeseries` for detail.\n This class is now based on\n :class:`~MDAnalysis.analysis.base.AnalysisBase`.\n\n\n ' super(WaterBridgeAnalysis, self).__init__(universe.trajectory, **kwargs) self.water_selection = water_selection self.update_water_selection = update_water_selection self.debug = debug self.output_format = output_format self.u = universe self.selection1 = selection1 self.selection2 = selection2 self.selection1_type = selection1_type if (selection1 == selection2): self.selection1_type = 'donor' self.update_selection = update_selection self.filter_first = filter_first self.distance = distance self.distance_type = distance_type self.angle = angle self.pbc = (pbc and all(self.u.dimensions[:3])) self.order = order if (donors is None): donors = () if (acceptors is None): acceptors = () self.forcefield = forcefield self.donors = tuple(set(self.DEFAULT_DONORS[forcefield]).union(donors)) self.acceptors = tuple(set(self.DEFAULT_ACCEPTORS[forcefield]).union(acceptors)) if (self.selection1_type not in ('both', 'donor', 'acceptor')): raise ValueError('HydrogenBondAnalysis: Invalid selection type {0!s}'.format(self.selection1_type)) self._network = [] self.timesteps = None self._log_parameters()<|docstring|>Set up the calculation of water bridges between two selections in a universe. The timeseries is accessible as the attribute :attr:`WaterBridgeAnalysis.timeseries`. If no hydrogen bonds are detected or if the initial check fails, look at the log output (enable with :func:`MDAnalysis.start_logging` and set `verbose` ``=True``). It is likely that the default names for donors and acceptors are not suitable (especially for non-standard ligands). In this case, either change the `forcefield` or use customized `donors` and/or `acceptors`. Parameters ---------- universe : Universe Universe object selection1 : str (optional) Selection string for first selection ['protein'] selection2 : str (optional) Selection string for second selection ['not resname SOL'] This string selects everything except water where water is assumed to have a residue name as SOL. water_selection : str (optional) Selection string for bridging water selection ['resname SOL'] The default selection assumes that the water molecules have residue name "SOL". Change it to the appropriate selection for your specific force field. However, in theory, this selection can be anything which forms a hydrogen bond with selection 1 and selection 2. order : int (optional) The maximum number of water bridges linking both selections. if the order is set to 3, then all the residues linked with less than three water molecules will be detected. [1] Computation of high order water bridges can be very time-consuming. Think carefully before running the calculation, do you really want to compute the 20th order water bridge between domain A and domain B or you just want to know the third order water bridge between two residues. selection1_type : {"donor", "acceptor", "both"} (optional) Selection 1 can be 'donor', 'acceptor' or 'both'. Note that the value for `selection1_type` automatically determines how `selection2` handles donors and acceptors: If `selection1` contains 'both' then `selection2` will also contain 'both'. If `selection1` is set to 'donor' then `selection2` is 'acceptor' (and vice versa). ['both']. update_selection : bool (optional) Update selection 1 and 2 at each frame. Setting to ``True`` if the selection is not static. Selections are filtered first to speed up performance. Thus, setting to ``True`` is recommended if contact surface between selection 1 and selection 2 is constantly changing. [``False``] update_water_selection : bool (optional) Update selection of water at each frame. Setting to ``False`` is **only** recommended when the total amount of water molecules in the simulation are small and when water molecules remain static across the simulation. However, in normal simulations, only a tiny proportion of water is engaged in the formation of water bridge. It is recommended to update the water selection and set keyword `filter_first` to ``True`` so as to filter out water not residing between the two selections. [``True``] filter_first : bool (optional) Filter the water selection to only include water within 4 Å + `order` * (2 Å + `distance`) away from `both` selection 1 and selection 2. Selection 1 and selection 2 are both filtered to only include atoms with the same distance away from the other selection. [``True``] distance : float (optional) Distance cutoff for hydrogen bonds; only interactions with a H-A distance <= `distance` (and the appropriate D-H-A angle, see `angle`) are recorded. (Note: `distance_type` can change this to the D-A distance.) [3.0] angle : float (optional) Angle cutoff for hydrogen bonds; an ideal H-bond has an angle of 180º. A hydrogen bond is only recorded if the D-H-A angle is >= `angle`. The default of 120º also finds fairly non-specific hydrogen interactions and possibly better value is 150º. [120.0] forcefield : {"CHARMM27", "GLYCAM06", "other"} (optional) Name of the forcefield used. Switches between different :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS` and :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS` values. ["CHARMM27"] donors : sequence (optional) Extra H donor atom types (in addition to those in :attr:`~HydrogenBondAnalysis.DEFAULT_DONORS`), must be a sequence. acceptors : sequence (optional) Extra H acceptor atom types (in addition to those in :attr:`~HydrogenBondAnalysis.DEFAULT_ACCEPTORS`), must be a sequence. distance_type : {"hydrogen", "heavy"} (optional) Measure hydrogen bond lengths between donor and acceptor heavy atoms ("heavy") or between donor hydrogen and acceptor heavy atom ("hydrogen"). If using "heavy" then one should set the *distance* cutoff to a higher value such as 3.5 Å. ["hydrogen"] output_format: {"sele1_sele2", "donor_acceptor"} (optional) Setting the output format for timeseries and table. If set to "sele1_sele2", for each hydrogen bond, the one close to selection 1 will be placed before selection 2. If set to "donor_acceptor", the donor will be placed before acceptor. "sele1_sele2"] debug : bool (optional) If set to ``True`` enables per-frame debug logging. This is disabled by default because it generates a very large amount of output in the log file. (Note that a logger must have been started to see the output, e.g. using :func:`MDAnalysis.start_logging`.) verbose : bool (optional) Toggle progress output. (Can also be given as keyword argument to :meth:`run`.) Notes ----- In order to speed up processing, atoms are filtered by a coarse distance criterion before a detailed hydrogen bonding analysis is performed (`filter_first` = ``True``). If selection 1 and selection 2 are very mobile during the simulation and the contact surface is constantly changing (i.e. residues are moving farther than 4 Å + `order` * (2 Å + `distance`)), you might consider setting the `update_selection` keywords to ``True`` to ensure correctness. .. versionchanged 0.20.0 The :attr:`WaterBridgeAnalysis.timeseries` has been updated see :attr:`WaterBridgeAnalysis.timeseries` for detail. This class is now based on :class:`~MDAnalysis.analysis.base.AnalysisBase`.<|endoftext|>
6ae9432b0354264dc7c821cbfb882f5838fdeaa781664346855f987885f4864b
def _log_parameters(self): 'Log important parameters to the logfile.' logger.info('WaterBridgeAnalysis: selection = %r (update: %r)', self.selection2, self.update_selection) logger.info('WaterBridgeAnalysis: water selection = %r (update: %r)', self.water_selection, self.update_water_selection) logger.info('WaterBridgeAnalysis: criterion: donor %s atom and acceptor atom distance <= %.3f A', self.distance_type, self.distance) logger.info('WaterBridgeAnalysis: criterion: angle D-H-A >= %.3f degrees', self.angle) logger.info('WaterBridgeAnalysis: force field %s to guess donor and acceptor names', self.forcefield)
Log important parameters to the logfile.
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_log_parameters
dtklinh/GBRDE
2
python
def _log_parameters(self): logger.info('WaterBridgeAnalysis: selection = %r (update: %r)', self.selection2, self.update_selection) logger.info('WaterBridgeAnalysis: water selection = %r (update: %r)', self.water_selection, self.update_water_selection) logger.info('WaterBridgeAnalysis: criterion: donor %s atom and acceptor atom distance <= %.3f A', self.distance_type, self.distance) logger.info('WaterBridgeAnalysis: criterion: angle D-H-A >= %.3f degrees', self.angle) logger.info('WaterBridgeAnalysis: force field %s to guess donor and acceptor names', self.forcefield)
def _log_parameters(self): logger.info('WaterBridgeAnalysis: selection = %r (update: %r)', self.selection2, self.update_selection) logger.info('WaterBridgeAnalysis: water selection = %r (update: %r)', self.water_selection, self.update_water_selection) logger.info('WaterBridgeAnalysis: criterion: donor %s atom and acceptor atom distance <= %.3f A', self.distance_type, self.distance) logger.info('WaterBridgeAnalysis: criterion: angle D-H-A >= %.3f degrees', self.angle) logger.info('WaterBridgeAnalysis: force field %s to guess donor and acceptor names', self.forcefield)<|docstring|>Log important parameters to the logfile.<|endoftext|>
c13b3d608a7b41bb05165e9ce9c6e91753a57f7872e4f795e8cb62121ab449cc
def _get_bonded_hydrogens(self, atom): 'Find hydrogens bonded within cutoff to `atom`.\n\n Hydrogens are detected by either name ("H*", "[123]H*") or type ("H");\n this is not fool-proof as the atom type is not always a character but\n the name pattern should catch most typical occurrences.\n\n The distance from `atom` is calculated for all hydrogens in the residue\n and only those within a cutoff are kept. The cutoff depends on the\n heavy atom (more precisely, on its element, which is taken as the first\n letter of its name ``atom.name[0]``) and is parameterized in\n :attr:`HydrogenBondAnalysis.r_cov`. If no match is found then the\n default of 1.5 Å is used.\n\n\n Parameters\n ----------\n atom : groups.Atom\n heavy atom\n\n Returns\n -------\n hydrogen_atoms : AtomGroup or []\n list of hydrogens (can be a :class:`~MDAnalysis.core.groups.AtomGroup`)\n or empty list ``[]`` if none were found.\n ' try: return atom.residue.atoms.select_atoms('(name H* 1H* 2H* 3H* or type H) and around {0:f} name {1!s}'.format(self.r_cov[atom.name[0]], atom.name)) except NoDataError: return []
Find hydrogens bonded within cutoff to `atom`. Hydrogens are detected by either name ("H*", "[123]H*") or type ("H"); this is not fool-proof as the atom type is not always a character but the name pattern should catch most typical occurrences. The distance from `atom` is calculated for all hydrogens in the residue and only those within a cutoff are kept. The cutoff depends on the heavy atom (more precisely, on its element, which is taken as the first letter of its name ``atom.name[0]``) and is parameterized in :attr:`HydrogenBondAnalysis.r_cov`. If no match is found then the default of 1.5 Å is used. Parameters ---------- atom : groups.Atom heavy atom Returns ------- hydrogen_atoms : AtomGroup or [] list of hydrogens (can be a :class:`~MDAnalysis.core.groups.AtomGroup`) or empty list ``[]`` if none were found.
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_get_bonded_hydrogens
dtklinh/GBRDE
2
python
def _get_bonded_hydrogens(self, atom): 'Find hydrogens bonded within cutoff to `atom`.\n\n Hydrogens are detected by either name ("H*", "[123]H*") or type ("H");\n this is not fool-proof as the atom type is not always a character but\n the name pattern should catch most typical occurrences.\n\n The distance from `atom` is calculated for all hydrogens in the residue\n and only those within a cutoff are kept. The cutoff depends on the\n heavy atom (more precisely, on its element, which is taken as the first\n letter of its name ``atom.name[0]``) and is parameterized in\n :attr:`HydrogenBondAnalysis.r_cov`. If no match is found then the\n default of 1.5 Å is used.\n\n\n Parameters\n ----------\n atom : groups.Atom\n heavy atom\n\n Returns\n -------\n hydrogen_atoms : AtomGroup or []\n list of hydrogens (can be a :class:`~MDAnalysis.core.groups.AtomGroup`)\n or empty list ``[]`` if none were found.\n ' try: return atom.residue.atoms.select_atoms('(name H* 1H* 2H* 3H* or type H) and around {0:f} name {1!s}'.format(self.r_cov[atom.name[0]], atom.name)) except NoDataError: return []
def _get_bonded_hydrogens(self, atom): 'Find hydrogens bonded within cutoff to `atom`.\n\n Hydrogens are detected by either name ("H*", "[123]H*") or type ("H");\n this is not fool-proof as the atom type is not always a character but\n the name pattern should catch most typical occurrences.\n\n The distance from `atom` is calculated for all hydrogens in the residue\n and only those within a cutoff are kept. The cutoff depends on the\n heavy atom (more precisely, on its element, which is taken as the first\n letter of its name ``atom.name[0]``) and is parameterized in\n :attr:`HydrogenBondAnalysis.r_cov`. If no match is found then the\n default of 1.5 Å is used.\n\n\n Parameters\n ----------\n atom : groups.Atom\n heavy atom\n\n Returns\n -------\n hydrogen_atoms : AtomGroup or []\n list of hydrogens (can be a :class:`~MDAnalysis.core.groups.AtomGroup`)\n or empty list ``[]`` if none were found.\n ' try: return atom.residue.atoms.select_atoms('(name H* 1H* 2H* 3H* or type H) and around {0:f} name {1!s}'.format(self.r_cov[atom.name[0]], atom.name)) except NoDataError: return []<|docstring|>Find hydrogens bonded within cutoff to `atom`. Hydrogens are detected by either name ("H*", "[123]H*") or type ("H"); this is not fool-proof as the atom type is not always a character but the name pattern should catch most typical occurrences. The distance from `atom` is calculated for all hydrogens in the residue and only those within a cutoff are kept. The cutoff depends on the heavy atom (more precisely, on its element, which is taken as the first letter of its name ``atom.name[0]``) and is parameterized in :attr:`HydrogenBondAnalysis.r_cov`. If no match is found then the default of 1.5 Å is used. Parameters ---------- atom : groups.Atom heavy atom Returns ------- hydrogen_atoms : AtomGroup or [] list of hydrogens (can be a :class:`~MDAnalysis.core.groups.AtomGroup`) or empty list ``[]`` if none were found.<|endoftext|>
32e9dfa8695301fc1501d1c18349b633965435dcaf0c98f9f040b790cba552fc
def _traverse_water_network(self, graph, current, analysis_func=None, output=None, link_func=None, **kwargs): '\n This function recursively traverses the water network self._network and finds the hydrogen bonds which connect\n the current atom to the next atom. The newly found hydrogen bond will be appended to the hydrogen bonds\n connecting the selection 1 to the current atom via link_func. When selection 2 is reached, the full list of\n hydrogen bonds connecting the selection 1 to selection 2 will be fed into analysis_func, which will then modify\n the output in place.\n :param graph: The connection network describes the connection between the atoms in the water network.\n :param current: The hydrogen bonds from selection 1 until now.\n :param analysis_func: The analysis function which is called to analysis the hydrogen bonds.\n :param output: where the result is stored.\n :param link_func: The new hydrogen bonds will be appended to current.\n :param kwargs: the keywords which are passed into the analysis_func.\n :return:\n ' if (link_func is None): link_func = self._full_link if (graph is None): if (not (analysis_func is None)): analysis_func(current, output, self.u, **kwargs) elif (len(current) <= self.order): for node in graph: new = link_func(current, node) self._traverse_water_network(graph[node], new, analysis_func, output, link_func, **kwargs)
This function recursively traverses the water network self._network and finds the hydrogen bonds which connect the current atom to the next atom. The newly found hydrogen bond will be appended to the hydrogen bonds connecting the selection 1 to the current atom via link_func. When selection 2 is reached, the full list of hydrogen bonds connecting the selection 1 to selection 2 will be fed into analysis_func, which will then modify the output in place. :param graph: The connection network describes the connection between the atoms in the water network. :param current: The hydrogen bonds from selection 1 until now. :param analysis_func: The analysis function which is called to analysis the hydrogen bonds. :param output: where the result is stored. :param link_func: The new hydrogen bonds will be appended to current. :param kwargs: the keywords which are passed into the analysis_func. :return:
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_traverse_water_network
dtklinh/GBRDE
2
python
def _traverse_water_network(self, graph, current, analysis_func=None, output=None, link_func=None, **kwargs): '\n This function recursively traverses the water network self._network and finds the hydrogen bonds which connect\n the current atom to the next atom. The newly found hydrogen bond will be appended to the hydrogen bonds\n connecting the selection 1 to the current atom via link_func. When selection 2 is reached, the full list of\n hydrogen bonds connecting the selection 1 to selection 2 will be fed into analysis_func, which will then modify\n the output in place.\n :param graph: The connection network describes the connection between the atoms in the water network.\n :param current: The hydrogen bonds from selection 1 until now.\n :param analysis_func: The analysis function which is called to analysis the hydrogen bonds.\n :param output: where the result is stored.\n :param link_func: The new hydrogen bonds will be appended to current.\n :param kwargs: the keywords which are passed into the analysis_func.\n :return:\n ' if (link_func is None): link_func = self._full_link if (graph is None): if (not (analysis_func is None)): analysis_func(current, output, self.u, **kwargs) elif (len(current) <= self.order): for node in graph: new = link_func(current, node) self._traverse_water_network(graph[node], new, analysis_func, output, link_func, **kwargs)
def _traverse_water_network(self, graph, current, analysis_func=None, output=None, link_func=None, **kwargs): '\n This function recursively traverses the water network self._network and finds the hydrogen bonds which connect\n the current atom to the next atom. The newly found hydrogen bond will be appended to the hydrogen bonds\n connecting the selection 1 to the current atom via link_func. When selection 2 is reached, the full list of\n hydrogen bonds connecting the selection 1 to selection 2 will be fed into analysis_func, which will then modify\n the output in place.\n :param graph: The connection network describes the connection between the atoms in the water network.\n :param current: The hydrogen bonds from selection 1 until now.\n :param analysis_func: The analysis function which is called to analysis the hydrogen bonds.\n :param output: where the result is stored.\n :param link_func: The new hydrogen bonds will be appended to current.\n :param kwargs: the keywords which are passed into the analysis_func.\n :return:\n ' if (link_func is None): link_func = self._full_link if (graph is None): if (not (analysis_func is None)): analysis_func(current, output, self.u, **kwargs) elif (len(current) <= self.order): for node in graph: new = link_func(current, node) self._traverse_water_network(graph[node], new, analysis_func, output, link_func, **kwargs)<|docstring|>This function recursively traverses the water network self._network and finds the hydrogen bonds which connect the current atom to the next atom. The newly found hydrogen bond will be appended to the hydrogen bonds connecting the selection 1 to the current atom via link_func. When selection 2 is reached, the full list of hydrogen bonds connecting the selection 1 to selection 2 will be fed into analysis_func, which will then modify the output in place. :param graph: The connection network describes the connection between the atoms in the water network. :param current: The hydrogen bonds from selection 1 until now. :param analysis_func: The analysis function which is called to analysis the hydrogen bonds. :param output: where the result is stored. :param link_func: The new hydrogen bonds will be appended to current. :param kwargs: the keywords which are passed into the analysis_func. :return:<|endoftext|>
96f39e4879e41079e30491e055fee82af3852ad9659db91dad065600f06b4388
def _expand_index(self, index): '\n Expand the index into (resname, resid, name).\n ' atom = self.u.atoms[index] return (atom.resname, atom.resid, atom.name)
Expand the index into (resname, resid, name).
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_expand_index
dtklinh/GBRDE
2
python
def _expand_index(self, index): '\n \n ' atom = self.u.atoms[index] return (atom.resname, atom.resid, atom.name)
def _expand_index(self, index): '\n \n ' atom = self.u.atoms[index] return (atom.resname, atom.resid, atom.name)<|docstring|>Expand the index into (resname, resid, name).<|endoftext|>
6860fbd9c3a1dc72e5d92190de491334ecc4fab0c140f22961de939d136f8cb0
def _expand_timeseries(self, entry, output_format=None): "\n Expand the compact data format into the old timeseries form.\n The old is defined as the format for release up to 0.19.2.\n As is discussed in Issue #2177, the internal storage of the hydrogen\n bond information has been changed to the compact format.\n The function takes in the argument `output_format` to see which output format will be chosen.\n if `output_format` is not specified, the value will be taken from :attr:`output_format`.\n If `output_format` is 'sele1_sele2', the output will be the old water bridge analysis format::\n\n # donor from selection 1 to acceptor in selection 2\n [sele1_index, sele2_index,\n (sele1_resname, sele1_resid, sele1_name),\n (sele2_resname, sele2_resid, sele2_name), dist, angle]\n\n If `output_format` is 'donor_acceptor', the output will be the old hydrogen bond analysis format::\n\n # From donor to acceptor\n [donor_index, acceptor_index,\n (donor_resname, donor_resid, donor_name),\n (acceptor_resname, acceptor_resid, acceptor_name), dist, angle]\n " output_format = (output_format or self.output_format) (atom1, heavy_atom1, atom2, heavy_atom2, dist, angle) = entry if (output_format == 'sele1_sele2'): (atom1, atom2) = (atom1, atom2) elif (output_format == 'donor_acceptor'): if (heavy_atom1 is None): (atom1, atom2) = (atom2, atom1) else: (atom1, atom2) = (atom1, atom2) else: raise KeyError("Only 'sele1_sele2' or 'donor_acceptor' are allowed as output format") return (atom1, atom2, self._expand_index(atom1), self._expand_index(atom2), dist, angle)
Expand the compact data format into the old timeseries form. The old is defined as the format for release up to 0.19.2. As is discussed in Issue #2177, the internal storage of the hydrogen bond information has been changed to the compact format. The function takes in the argument `output_format` to see which output format will be chosen. if `output_format` is not specified, the value will be taken from :attr:`output_format`. If `output_format` is 'sele1_sele2', the output will be the old water bridge analysis format:: # donor from selection 1 to acceptor in selection 2 [sele1_index, sele2_index, (sele1_resname, sele1_resid, sele1_name), (sele2_resname, sele2_resid, sele2_name), dist, angle] If `output_format` is 'donor_acceptor', the output will be the old hydrogen bond analysis format:: # From donor to acceptor [donor_index, acceptor_index, (donor_resname, donor_resid, donor_name), (acceptor_resname, acceptor_resid, acceptor_name), dist, angle]
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_expand_timeseries
dtklinh/GBRDE
2
python
def _expand_timeseries(self, entry, output_format=None): "\n Expand the compact data format into the old timeseries form.\n The old is defined as the format for release up to 0.19.2.\n As is discussed in Issue #2177, the internal storage of the hydrogen\n bond information has been changed to the compact format.\n The function takes in the argument `output_format` to see which output format will be chosen.\n if `output_format` is not specified, the value will be taken from :attr:`output_format`.\n If `output_format` is 'sele1_sele2', the output will be the old water bridge analysis format::\n\n # donor from selection 1 to acceptor in selection 2\n [sele1_index, sele2_index,\n (sele1_resname, sele1_resid, sele1_name),\n (sele2_resname, sele2_resid, sele2_name), dist, angle]\n\n If `output_format` is 'donor_acceptor', the output will be the old hydrogen bond analysis format::\n\n # From donor to acceptor\n [donor_index, acceptor_index,\n (donor_resname, donor_resid, donor_name),\n (acceptor_resname, acceptor_resid, acceptor_name), dist, angle]\n " output_format = (output_format or self.output_format) (atom1, heavy_atom1, atom2, heavy_atom2, dist, angle) = entry if (output_format == 'sele1_sele2'): (atom1, atom2) = (atom1, atom2) elif (output_format == 'donor_acceptor'): if (heavy_atom1 is None): (atom1, atom2) = (atom2, atom1) else: (atom1, atom2) = (atom1, atom2) else: raise KeyError("Only 'sele1_sele2' or 'donor_acceptor' are allowed as output format") return (atom1, atom2, self._expand_index(atom1), self._expand_index(atom2), dist, angle)
def _expand_timeseries(self, entry, output_format=None): "\n Expand the compact data format into the old timeseries form.\n The old is defined as the format for release up to 0.19.2.\n As is discussed in Issue #2177, the internal storage of the hydrogen\n bond information has been changed to the compact format.\n The function takes in the argument `output_format` to see which output format will be chosen.\n if `output_format` is not specified, the value will be taken from :attr:`output_format`.\n If `output_format` is 'sele1_sele2', the output will be the old water bridge analysis format::\n\n # donor from selection 1 to acceptor in selection 2\n [sele1_index, sele2_index,\n (sele1_resname, sele1_resid, sele1_name),\n (sele2_resname, sele2_resid, sele2_name), dist, angle]\n\n If `output_format` is 'donor_acceptor', the output will be the old hydrogen bond analysis format::\n\n # From donor to acceptor\n [donor_index, acceptor_index,\n (donor_resname, donor_resid, donor_name),\n (acceptor_resname, acceptor_resid, acceptor_name), dist, angle]\n " output_format = (output_format or self.output_format) (atom1, heavy_atom1, atom2, heavy_atom2, dist, angle) = entry if (output_format == 'sele1_sele2'): (atom1, atom2) = (atom1, atom2) elif (output_format == 'donor_acceptor'): if (heavy_atom1 is None): (atom1, atom2) = (atom2, atom1) else: (atom1, atom2) = (atom1, atom2) else: raise KeyError("Only 'sele1_sele2' or 'donor_acceptor' are allowed as output format") return (atom1, atom2, self._expand_index(atom1), self._expand_index(atom2), dist, angle)<|docstring|>Expand the compact data format into the old timeseries form. The old is defined as the format for release up to 0.19.2. As is discussed in Issue #2177, the internal storage of the hydrogen bond information has been changed to the compact format. The function takes in the argument `output_format` to see which output format will be chosen. if `output_format` is not specified, the value will be taken from :attr:`output_format`. If `output_format` is 'sele1_sele2', the output will be the old water bridge analysis format:: # donor from selection 1 to acceptor in selection 2 [sele1_index, sele2_index, (sele1_resname, sele1_resid, sele1_name), (sele2_resname, sele2_resid, sele2_name), dist, angle] If `output_format` is 'donor_acceptor', the output will be the old hydrogen bond analysis format:: # From donor to acceptor [donor_index, acceptor_index, (donor_resname, donor_resid, donor_name), (acceptor_resname, acceptor_resid, acceptor_name), dist, angle]<|endoftext|>
6aeacba0419316ee0caf733495ab53250ffd25a1b01a76fa144f7d4ac03b4e6e
def _generate_timeseries(self, output_format=None): 'Time series of water bridges.\n\n The output is generated per frame as is explained in :ref:`wb_Analysis_Timeseries`.\n The format of output can be changed via the output_format selection.\n If ``output_format="sele1_sele2"``, the hydrogen bond forms a directional\n link from selection 1 to selection 2. If ``output_format="donor_acceptor"``,\n for each hydrogen bond, the donor is always written before the acceptor.\n\n Note\n ----\n To find an acceptor atom in :attr:`Universe.atoms` by\n *index* one would use ``u.atoms[acceptor_index]``.\n\n The :attr:`timeseries` is a managed attribute and it is generated\n from the underlying data in :attr:`_network` every time the\n attribute is accessed. It is therefore costly to call and if\n :attr:`timeseries` is needed repeatedly it is recommended that you\n assign to a variable::\n\n w = WaterBridgeAnalysis(u)\n w.run()\n timeseries = w.timeseries\n\n .. versionchanged 0.20.0\n The :attr:`WaterBridgeAnalysis.timeseries` has been updated where\n the donor and acceptor string has been changed to tuple\n (resname string, resid, name_string).\n\n\n ' output_format = (output_format or self.output_format) def analysis(current, output, *args, **kwargs): output = current timeseries = [] for frame in self._network: new_frame = [] self._traverse_water_network(frame, new_frame, analysis_func=analysis, output=new_frame, link_func=self._compact_link) timeseries.append([self._expand_timeseries(entry, output_format) for entry in new_frame]) return timeseries
Time series of water bridges. The output is generated per frame as is explained in :ref:`wb_Analysis_Timeseries`. The format of output can be changed via the output_format selection. If ``output_format="sele1_sele2"``, the hydrogen bond forms a directional link from selection 1 to selection 2. If ``output_format="donor_acceptor"``, for each hydrogen bond, the donor is always written before the acceptor. Note ---- To find an acceptor atom in :attr:`Universe.atoms` by *index* one would use ``u.atoms[acceptor_index]``. The :attr:`timeseries` is a managed attribute and it is generated from the underlying data in :attr:`_network` every time the attribute is accessed. It is therefore costly to call and if :attr:`timeseries` is needed repeatedly it is recommended that you assign to a variable:: w = WaterBridgeAnalysis(u) w.run() timeseries = w.timeseries .. versionchanged 0.20.0 The :attr:`WaterBridgeAnalysis.timeseries` has been updated where the donor and acceptor string has been changed to tuple (resname string, resid, name_string).
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_generate_timeseries
dtklinh/GBRDE
2
python
def _generate_timeseries(self, output_format=None): 'Time series of water bridges.\n\n The output is generated per frame as is explained in :ref:`wb_Analysis_Timeseries`.\n The format of output can be changed via the output_format selection.\n If ``output_format="sele1_sele2"``, the hydrogen bond forms a directional\n link from selection 1 to selection 2. If ``output_format="donor_acceptor"``,\n for each hydrogen bond, the donor is always written before the acceptor.\n\n Note\n ----\n To find an acceptor atom in :attr:`Universe.atoms` by\n *index* one would use ``u.atoms[acceptor_index]``.\n\n The :attr:`timeseries` is a managed attribute and it is generated\n from the underlying data in :attr:`_network` every time the\n attribute is accessed. It is therefore costly to call and if\n :attr:`timeseries` is needed repeatedly it is recommended that you\n assign to a variable::\n\n w = WaterBridgeAnalysis(u)\n w.run()\n timeseries = w.timeseries\n\n .. versionchanged 0.20.0\n The :attr:`WaterBridgeAnalysis.timeseries` has been updated where\n the donor and acceptor string has been changed to tuple\n (resname string, resid, name_string).\n\n\n ' output_format = (output_format or self.output_format) def analysis(current, output, *args, **kwargs): output = current timeseries = [] for frame in self._network: new_frame = [] self._traverse_water_network(frame, new_frame, analysis_func=analysis, output=new_frame, link_func=self._compact_link) timeseries.append([self._expand_timeseries(entry, output_format) for entry in new_frame]) return timeseries
def _generate_timeseries(self, output_format=None): 'Time series of water bridges.\n\n The output is generated per frame as is explained in :ref:`wb_Analysis_Timeseries`.\n The format of output can be changed via the output_format selection.\n If ``output_format="sele1_sele2"``, the hydrogen bond forms a directional\n link from selection 1 to selection 2. If ``output_format="donor_acceptor"``,\n for each hydrogen bond, the donor is always written before the acceptor.\n\n Note\n ----\n To find an acceptor atom in :attr:`Universe.atoms` by\n *index* one would use ``u.atoms[acceptor_index]``.\n\n The :attr:`timeseries` is a managed attribute and it is generated\n from the underlying data in :attr:`_network` every time the\n attribute is accessed. It is therefore costly to call and if\n :attr:`timeseries` is needed repeatedly it is recommended that you\n assign to a variable::\n\n w = WaterBridgeAnalysis(u)\n w.run()\n timeseries = w.timeseries\n\n .. versionchanged 0.20.0\n The :attr:`WaterBridgeAnalysis.timeseries` has been updated where\n the donor and acceptor string has been changed to tuple\n (resname string, resid, name_string).\n\n\n ' output_format = (output_format or self.output_format) def analysis(current, output, *args, **kwargs): output = current timeseries = [] for frame in self._network: new_frame = [] self._traverse_water_network(frame, new_frame, analysis_func=analysis, output=new_frame, link_func=self._compact_link) timeseries.append([self._expand_timeseries(entry, output_format) for entry in new_frame]) return timeseries<|docstring|>Time series of water bridges. The output is generated per frame as is explained in :ref:`wb_Analysis_Timeseries`. The format of output can be changed via the output_format selection. If ``output_format="sele1_sele2"``, the hydrogen bond forms a directional link from selection 1 to selection 2. If ``output_format="donor_acceptor"``, for each hydrogen bond, the donor is always written before the acceptor. Note ---- To find an acceptor atom in :attr:`Universe.atoms` by *index* one would use ``u.atoms[acceptor_index]``. The :attr:`timeseries` is a managed attribute and it is generated from the underlying data in :attr:`_network` every time the attribute is accessed. It is therefore costly to call and if :attr:`timeseries` is needed repeatedly it is recommended that you assign to a variable:: w = WaterBridgeAnalysis(u) w.run() timeseries = w.timeseries .. versionchanged 0.20.0 The :attr:`WaterBridgeAnalysis.timeseries` has been updated where the donor and acceptor string has been changed to tuple (resname string, resid, name_string).<|endoftext|>
dfb21e365327fabf426d293f0e454d51f15cedfc64911cd7541d351a151f420a
def _get_network(self): 'Network representation of the water network.\n\n The output is generated per frame as is explained in :ref:`wb_Analysis_Network`.\n Each hydrogen bond has a compact representation of ::\n\n [sele1_acceptor_idx, None, sele2_donor_idx, donor_heavy_idx, distance, angle]\n\n or ::\n\n [sele1_donor_idx, donor_heavy_idx, sele1_acceptor_idx, None, distance, angle]\n\n The donor_heavy_idx is the heavy atom bonding to the proton and atoms\n can be retrived from the universe::\n\n atom = u.atoms[idx]\n\n .. versionadded:: 0.20.0\n\n ' return self._network
Network representation of the water network. The output is generated per frame as is explained in :ref:`wb_Analysis_Network`. Each hydrogen bond has a compact representation of :: [sele1_acceptor_idx, None, sele2_donor_idx, donor_heavy_idx, distance, angle] or :: [sele1_donor_idx, donor_heavy_idx, sele1_acceptor_idx, None, distance, angle] The donor_heavy_idx is the heavy atom bonding to the proton and atoms can be retrived from the universe:: atom = u.atoms[idx] .. versionadded:: 0.20.0
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_get_network
dtklinh/GBRDE
2
python
def _get_network(self): 'Network representation of the water network.\n\n The output is generated per frame as is explained in :ref:`wb_Analysis_Network`.\n Each hydrogen bond has a compact representation of ::\n\n [sele1_acceptor_idx, None, sele2_donor_idx, donor_heavy_idx, distance, angle]\n\n or ::\n\n [sele1_donor_idx, donor_heavy_idx, sele1_acceptor_idx, None, distance, angle]\n\n The donor_heavy_idx is the heavy atom bonding to the proton and atoms\n can be retrived from the universe::\n\n atom = u.atoms[idx]\n\n .. versionadded:: 0.20.0\n\n ' return self._network
def _get_network(self): 'Network representation of the water network.\n\n The output is generated per frame as is explained in :ref:`wb_Analysis_Network`.\n Each hydrogen bond has a compact representation of ::\n\n [sele1_acceptor_idx, None, sele2_donor_idx, donor_heavy_idx, distance, angle]\n\n or ::\n\n [sele1_donor_idx, donor_heavy_idx, sele1_acceptor_idx, None, distance, angle]\n\n The donor_heavy_idx is the heavy atom bonding to the proton and atoms\n can be retrived from the universe::\n\n atom = u.atoms[idx]\n\n .. versionadded:: 0.20.0\n\n ' return self._network<|docstring|>Network representation of the water network. The output is generated per frame as is explained in :ref:`wb_Analysis_Network`. Each hydrogen bond has a compact representation of :: [sele1_acceptor_idx, None, sele2_donor_idx, donor_heavy_idx, distance, angle] or :: [sele1_donor_idx, donor_heavy_idx, sele1_acceptor_idx, None, distance, angle] The donor_heavy_idx is the heavy atom bonding to the proton and atoms can be retrived from the universe:: atom = u.atoms[idx] .. versionadded:: 0.20.0<|endoftext|>
7c6160609748b2e3c30dd14f64e6540ef294b67fee40710bb5e9eea104a0d539
@classmethod def _full_link(self, output, node): '\n A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds.\n :param output: The existing hydrogen bonds from selection 1\n :param node: The new hydrogen bond\n :return: The hydrogen bonds from selection 1 with the new hydrogen bond added\n ' result = output[:] result.append(node) return result
A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds. :param output: The existing hydrogen bonds from selection 1 :param node: The new hydrogen bond :return: The hydrogen bonds from selection 1 with the new hydrogen bond added
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_full_link
dtklinh/GBRDE
2
python
@classmethod def _full_link(self, output, node): '\n A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds.\n :param output: The existing hydrogen bonds from selection 1\n :param node: The new hydrogen bond\n :return: The hydrogen bonds from selection 1 with the new hydrogen bond added\n ' result = output[:] result.append(node) return result
@classmethod def _full_link(self, output, node): '\n A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds.\n :param output: The existing hydrogen bonds from selection 1\n :param node: The new hydrogen bond\n :return: The hydrogen bonds from selection 1 with the new hydrogen bond added\n ' result = output[:] result.append(node) return result<|docstring|>A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds. :param output: The existing hydrogen bonds from selection 1 :param node: The new hydrogen bond :return: The hydrogen bonds from selection 1 with the new hydrogen bond added<|endoftext|>
d44b0627bc99657b0e450f102ea5344224c40b2b254a77e4408ca8dd5974c600
@classmethod def _compact_link(self, output, node): '\n A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds.\n In this form no new list is created and thus, one bridge will only appear once.\n :param output: The existing hydrogen bonds from selection 1\n :param node: The new hydrogen bond\n :return: The hydrogen bonds from selection 1 with the new hydrogen bond added\n ' output.append(node) return output
A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds. In this form no new list is created and thus, one bridge will only appear once. :param output: The existing hydrogen bonds from selection 1 :param node: The new hydrogen bond :return: The hydrogen bonds from selection 1 with the new hydrogen bond added
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_compact_link
dtklinh/GBRDE
2
python
@classmethod def _compact_link(self, output, node): '\n A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds.\n In this form no new list is created and thus, one bridge will only appear once.\n :param output: The existing hydrogen bonds from selection 1\n :param node: The new hydrogen bond\n :return: The hydrogen bonds from selection 1 with the new hydrogen bond added\n ' output.append(node) return output
@classmethod def _compact_link(self, output, node): '\n A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds.\n In this form no new list is created and thus, one bridge will only appear once.\n :param output: The existing hydrogen bonds from selection 1\n :param node: The new hydrogen bond\n :return: The hydrogen bonds from selection 1 with the new hydrogen bond added\n ' output.append(node) return output<|docstring|>A function used in _traverse_water_network to add the new hydrogen bond to the existing bonds. In this form no new list is created and thus, one bridge will only appear once. :param output: The existing hydrogen bonds from selection 1 :param node: The new hydrogen bond :return: The hydrogen bonds from selection 1 with the new hydrogen bond added<|endoftext|>
29b5cae0b7c1824e494df9cb3aa9b3261fac4f1eda6e13a2e0a2e919c8a18f39
def _count_by_type_analysis(self, current, output, *args, **kwargs): '\n Generates the key for count_by_type analysis.\n :return:\n ' (s1_index, to_index, (s1_resname, s1_resid, s1_name), (to_resname, to_resid, to_name), dist, angle) = self._expand_timeseries(current[0]) (from_index, s2_index, (from_resname, from_resid, from_name), (s2_resname, s2_resid, s2_name), dist, angle) = self._expand_timeseries(current[(- 1)]) key = (s1_index, s2_index, s1_resname, s1_resid, s1_name, s2_resname, s2_resid, s2_name) output[key] += 1
Generates the key for count_by_type analysis. :return:
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
_count_by_type_analysis
dtklinh/GBRDE
2
python
def _count_by_type_analysis(self, current, output, *args, **kwargs): '\n Generates the key for count_by_type analysis.\n :return:\n ' (s1_index, to_index, (s1_resname, s1_resid, s1_name), (to_resname, to_resid, to_name), dist, angle) = self._expand_timeseries(current[0]) (from_index, s2_index, (from_resname, from_resid, from_name), (s2_resname, s2_resid, s2_name), dist, angle) = self._expand_timeseries(current[(- 1)]) key = (s1_index, s2_index, s1_resname, s1_resid, s1_name, s2_resname, s2_resid, s2_name) output[key] += 1
def _count_by_type_analysis(self, current, output, *args, **kwargs): '\n Generates the key for count_by_type analysis.\n :return:\n ' (s1_index, to_index, (s1_resname, s1_resid, s1_name), (to_resname, to_resid, to_name), dist, angle) = self._expand_timeseries(current[0]) (from_index, s2_index, (from_resname, from_resid, from_name), (s2_resname, s2_resid, s2_name), dist, angle) = self._expand_timeseries(current[(- 1)]) key = (s1_index, s2_index, s1_resname, s1_resid, s1_name, s2_resname, s2_resid, s2_name) output[key] += 1<|docstring|>Generates the key for count_by_type analysis. :return:<|endoftext|>
24cc9645a52aefe097c02745fb50fed28518b7085b7f31e038882ca1702c97ca
def count_by_type(self, analysis_func=None, **kwargs): 'Counts the frequency of water bridge of a specific type.\n\n If one atom *A* from *selection 1* is linked to atom *B* from\n *selection 2* through one or more bridging waters, an entity will be created and\n the proportion of time that this linkage exists in the whole simulation\n will be calculated.\n\n The identification of a specific type of water bridge can be modified by\n supplying the analysis_func function. See :ref:`wb_count_by_type`\n for detail.\n\n Returns\n -------\n counts : list\n Returns a :class:`list` containing atom indices for *A* and\n *B*, residue names, residue numbers, atom names (for both A and B) and\n the fraction of the total time during which the water bridge was\n detected. This method returns None if method\n :meth:`WaterBridgeAnalysis.run` was not executed first.\n\n\n ' output = None if (analysis_func is None): analysis_func = self._count_by_type_analysis output = 'combined' if self._network: length = len(self._network) result_dict = defaultdict(int) for frame in self._network: frame_dict = defaultdict(int) self._traverse_water_network(frame, [], analysis_func=analysis_func, output=frame_dict, link_func=self._full_link, **kwargs) for (key, value) in frame_dict.items(): result_dict[key] += frame_dict[key] if (output == 'combined'): result = [[i for i in key] for key in result_dict] [result[i].append((result_dict[key] / length)) for (i, key) in enumerate(result_dict)] else: result = [(key, (result_dict[key] / length)) for key in result_dict] return result else: return None
Counts the frequency of water bridge of a specific type. If one atom *A* from *selection 1* is linked to atom *B* from *selection 2* through one or more bridging waters, an entity will be created and the proportion of time that this linkage exists in the whole simulation will be calculated. The identification of a specific type of water bridge can be modified by supplying the analysis_func function. See :ref:`wb_count_by_type` for detail. Returns ------- counts : list Returns a :class:`list` containing atom indices for *A* and *B*, residue names, residue numbers, atom names (for both A and B) and the fraction of the total time during which the water bridge was detected. This method returns None if method :meth:`WaterBridgeAnalysis.run` was not executed first.
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
count_by_type
dtklinh/GBRDE
2
python
def count_by_type(self, analysis_func=None, **kwargs): 'Counts the frequency of water bridge of a specific type.\n\n If one atom *A* from *selection 1* is linked to atom *B* from\n *selection 2* through one or more bridging waters, an entity will be created and\n the proportion of time that this linkage exists in the whole simulation\n will be calculated.\n\n The identification of a specific type of water bridge can be modified by\n supplying the analysis_func function. See :ref:`wb_count_by_type`\n for detail.\n\n Returns\n -------\n counts : list\n Returns a :class:`list` containing atom indices for *A* and\n *B*, residue names, residue numbers, atom names (for both A and B) and\n the fraction of the total time during which the water bridge was\n detected. This method returns None if method\n :meth:`WaterBridgeAnalysis.run` was not executed first.\n\n\n ' output = None if (analysis_func is None): analysis_func = self._count_by_type_analysis output = 'combined' if self._network: length = len(self._network) result_dict = defaultdict(int) for frame in self._network: frame_dict = defaultdict(int) self._traverse_water_network(frame, [], analysis_func=analysis_func, output=frame_dict, link_func=self._full_link, **kwargs) for (key, value) in frame_dict.items(): result_dict[key] += frame_dict[key] if (output == 'combined'): result = [[i for i in key] for key in result_dict] [result[i].append((result_dict[key] / length)) for (i, key) in enumerate(result_dict)] else: result = [(key, (result_dict[key] / length)) for key in result_dict] return result else: return None
def count_by_type(self, analysis_func=None, **kwargs): 'Counts the frequency of water bridge of a specific type.\n\n If one atom *A* from *selection 1* is linked to atom *B* from\n *selection 2* through one or more bridging waters, an entity will be created and\n the proportion of time that this linkage exists in the whole simulation\n will be calculated.\n\n The identification of a specific type of water bridge can be modified by\n supplying the analysis_func function. See :ref:`wb_count_by_type`\n for detail.\n\n Returns\n -------\n counts : list\n Returns a :class:`list` containing atom indices for *A* and\n *B*, residue names, residue numbers, atom names (for both A and B) and\n the fraction of the total time during which the water bridge was\n detected. This method returns None if method\n :meth:`WaterBridgeAnalysis.run` was not executed first.\n\n\n ' output = None if (analysis_func is None): analysis_func = self._count_by_type_analysis output = 'combined' if self._network: length = len(self._network) result_dict = defaultdict(int) for frame in self._network: frame_dict = defaultdict(int) self._traverse_water_network(frame, [], analysis_func=analysis_func, output=frame_dict, link_func=self._full_link, **kwargs) for (key, value) in frame_dict.items(): result_dict[key] += frame_dict[key] if (output == 'combined'): result = [[i for i in key] for key in result_dict] [result[i].append((result_dict[key] / length)) for (i, key) in enumerate(result_dict)] else: result = [(key, (result_dict[key] / length)) for key in result_dict] return result else: return None<|docstring|>Counts the frequency of water bridge of a specific type. If one atom *A* from *selection 1* is linked to atom *B* from *selection 2* through one or more bridging waters, an entity will be created and the proportion of time that this linkage exists in the whole simulation will be calculated. The identification of a specific type of water bridge can be modified by supplying the analysis_func function. See :ref:`wb_count_by_type` for detail. Returns ------- counts : list Returns a :class:`list` containing atom indices for *A* and *B*, residue names, residue numbers, atom names (for both A and B) and the fraction of the total time during which the water bridge was detected. This method returns None if method :meth:`WaterBridgeAnalysis.run` was not executed first.<|endoftext|>
c81afcf0a4639c8a491150bb5d19dd2d91d64c6dbbd00c72b8026ea6e3bf7b03
def count_by_time(self, analysis_func=None, **kwargs): 'Counts the number of water bridges per timestep.\n\n The counting behaviour can be adjusted by supplying analysis_func.\n See :ref:`wb_count_by_time` for details.\n\n Returns\n -------\n counts : list\n Returns a time series ``N(t)`` where ``N`` is the total\n number of observed water bridges at time ``t``.\n\n ' if (analysis_func is None): analysis_func = self._count_by_time_analysis if self._network: result = [] for (time, frame) in zip(self.timesteps, self._network): result_dict = defaultdict(int) self._traverse_water_network(frame, [], analysis_func=analysis_func, output=result_dict, link_func=self._full_link, **kwargs) result.append((time, sum([result_dict[key] for key in result_dict]))) return result else: return None
Counts the number of water bridges per timestep. The counting behaviour can be adjusted by supplying analysis_func. See :ref:`wb_count_by_time` for details. Returns ------- counts : list Returns a time series ``N(t)`` where ``N`` is the total number of observed water bridges at time ``t``.
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
count_by_time
dtklinh/GBRDE
2
python
def count_by_time(self, analysis_func=None, **kwargs): 'Counts the number of water bridges per timestep.\n\n The counting behaviour can be adjusted by supplying analysis_func.\n See :ref:`wb_count_by_time` for details.\n\n Returns\n -------\n counts : list\n Returns a time series ``N(t)`` where ``N`` is the total\n number of observed water bridges at time ``t``.\n\n ' if (analysis_func is None): analysis_func = self._count_by_time_analysis if self._network: result = [] for (time, frame) in zip(self.timesteps, self._network): result_dict = defaultdict(int) self._traverse_water_network(frame, [], analysis_func=analysis_func, output=result_dict, link_func=self._full_link, **kwargs) result.append((time, sum([result_dict[key] for key in result_dict]))) return result else: return None
def count_by_time(self, analysis_func=None, **kwargs): 'Counts the number of water bridges per timestep.\n\n The counting behaviour can be adjusted by supplying analysis_func.\n See :ref:`wb_count_by_time` for details.\n\n Returns\n -------\n counts : list\n Returns a time series ``N(t)`` where ``N`` is the total\n number of observed water bridges at time ``t``.\n\n ' if (analysis_func is None): analysis_func = self._count_by_time_analysis if self._network: result = [] for (time, frame) in zip(self.timesteps, self._network): result_dict = defaultdict(int) self._traverse_water_network(frame, [], analysis_func=analysis_func, output=result_dict, link_func=self._full_link, **kwargs) result.append((time, sum([result_dict[key] for key in result_dict]))) return result else: return None<|docstring|>Counts the number of water bridges per timestep. The counting behaviour can be adjusted by supplying analysis_func. See :ref:`wb_count_by_time` for details. Returns ------- counts : list Returns a time series ``N(t)`` where ``N`` is the total number of observed water bridges at time ``t``.<|endoftext|>
547c2944cf4fa09f3aa912e7bb71ccbe33bbd22c86684c04a61d789a46a8ffdd
def timesteps_by_type(self, analysis_func=None, **kwargs): 'Frames during which each water bridges existed, sorted by each water bridges.\n\n Processes :attr:`WaterBridgeAnalysis._network` and returns a\n :class:`list` containing atom indices, residue names, residue\n numbers (from selection 1 and selection 2) and each timestep at which the\n water bridge was detected.\n\n Similar to :meth:`~WaterBridgeAnalysis.count_by_type` and\n :meth:`~WaterBridgeAnalysis.count_by_time`, the behavior can be adjusted by\n supplying an analysis_func.\n\n Returns\n -------\n data : list\n\n ' output = None if (analysis_func is None): analysis_func = self._timesteps_by_type_analysis output = 'combined' if self._network: result = defaultdict(list) if (self.timesteps is None): timesteps = range(len(self._network)) else: timesteps = self.timesteps for (time, frame) in zip(timesteps, self._network): self._traverse_water_network(frame, [], analysis_func=analysis_func, output=result, link_func=self._full_link, time=time, **kwargs) result_list = [] for (key, time_list) in six.iteritems(result): for time in time_list: if (output == 'combined'): key = list(key) key.append(time) result_list.append(key) else: result_list.append((key, time)) return result_list else: return None
Frames during which each water bridges existed, sorted by each water bridges. Processes :attr:`WaterBridgeAnalysis._network` and returns a :class:`list` containing atom indices, residue names, residue numbers (from selection 1 and selection 2) and each timestep at which the water bridge was detected. Similar to :meth:`~WaterBridgeAnalysis.count_by_type` and :meth:`~WaterBridgeAnalysis.count_by_time`, the behavior can be adjusted by supplying an analysis_func. Returns ------- data : list
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
timesteps_by_type
dtklinh/GBRDE
2
python
def timesteps_by_type(self, analysis_func=None, **kwargs): 'Frames during which each water bridges existed, sorted by each water bridges.\n\n Processes :attr:`WaterBridgeAnalysis._network` and returns a\n :class:`list` containing atom indices, residue names, residue\n numbers (from selection 1 and selection 2) and each timestep at which the\n water bridge was detected.\n\n Similar to :meth:`~WaterBridgeAnalysis.count_by_type` and\n :meth:`~WaterBridgeAnalysis.count_by_time`, the behavior can be adjusted by\n supplying an analysis_func.\n\n Returns\n -------\n data : list\n\n ' output = None if (analysis_func is None): analysis_func = self._timesteps_by_type_analysis output = 'combined' if self._network: result = defaultdict(list) if (self.timesteps is None): timesteps = range(len(self._network)) else: timesteps = self.timesteps for (time, frame) in zip(timesteps, self._network): self._traverse_water_network(frame, [], analysis_func=analysis_func, output=result, link_func=self._full_link, time=time, **kwargs) result_list = [] for (key, time_list) in six.iteritems(result): for time in time_list: if (output == 'combined'): key = list(key) key.append(time) result_list.append(key) else: result_list.append((key, time)) return result_list else: return None
def timesteps_by_type(self, analysis_func=None, **kwargs): 'Frames during which each water bridges existed, sorted by each water bridges.\n\n Processes :attr:`WaterBridgeAnalysis._network` and returns a\n :class:`list` containing atom indices, residue names, residue\n numbers (from selection 1 and selection 2) and each timestep at which the\n water bridge was detected.\n\n Similar to :meth:`~WaterBridgeAnalysis.count_by_type` and\n :meth:`~WaterBridgeAnalysis.count_by_time`, the behavior can be adjusted by\n supplying an analysis_func.\n\n Returns\n -------\n data : list\n\n ' output = None if (analysis_func is None): analysis_func = self._timesteps_by_type_analysis output = 'combined' if self._network: result = defaultdict(list) if (self.timesteps is None): timesteps = range(len(self._network)) else: timesteps = self.timesteps for (time, frame) in zip(timesteps, self._network): self._traverse_water_network(frame, [], analysis_func=analysis_func, output=result, link_func=self._full_link, time=time, **kwargs) result_list = [] for (key, time_list) in six.iteritems(result): for time in time_list: if (output == 'combined'): key = list(key) key.append(time) result_list.append(key) else: result_list.append((key, time)) return result_list else: return None<|docstring|>Frames during which each water bridges existed, sorted by each water bridges. Processes :attr:`WaterBridgeAnalysis._network` and returns a :class:`list` containing atom indices, residue names, residue numbers (from selection 1 and selection 2) and each timestep at which the water bridge was detected. Similar to :meth:`~WaterBridgeAnalysis.count_by_type` and :meth:`~WaterBridgeAnalysis.count_by_time`, the behavior can be adjusted by supplying an analysis_func. Returns ------- data : list<|endoftext|>
5e54e5b030094cf4af646be94e40bea7550db2c4698c60b46a286d8d76ec74dd
def generate_table(self, output_format=None): 'Generate a normalised table of the results.\n\n The table is stored as a :class:`numpy.recarray` in the\n attribute :attr:`~WaterBridgeAnalysis.table`.\n\n The output format of :attr:`~WaterBridgeAnalysis.table` can also be\n changed using output_format in a fashion similar to :attr:`WaterBridgeAnalysis.timeseries`\n ' output_format = (output_format or self.output_format) if (self._network == []): msg = 'No data computed, do run() first.' warnings.warn(msg, category=MissingDataWarning) logger.warning(msg) return None timeseries = self._generate_timeseries(output_format) num_records = np.sum([len(hframe) for hframe in timeseries]) if (output_format == 'sele1_sele2'): dtype = [('time', float), ('sele1_index', int), ('sele2_index', int), ('sele1_resnm', '|U4'), ('sele1_resid', int), ('sele1_atom', '|U4'), ('sele2_resnm', '|U4'), ('sele2_resid', int), ('sele2_atom', '|U4'), ('distance', float), ('angle', float)] elif (output_format == 'donor_acceptor'): dtype = [('time', float), ('donor_index', int), ('acceptor_index', int), ('donor_resnm', '|U4'), ('donor_resid', int), ('donor_atom', '|U4'), ('acceptor_resnm', '|U4'), ('acceptor_resid', int), ('acceptor_atom', '|U4'), ('distance', float), ('angle', float)] out = np.empty((num_records,), dtype=dtype) cursor = 0 for (t, hframe) in zip(self.timesteps, timeseries): for (donor_index, acceptor_index, donor, acceptor, distance, angle) in hframe: out[cursor] = ((((t, donor_index, acceptor_index) + donor) + acceptor) + (distance, angle)) cursor += 1 assert (cursor == num_records), 'Internal Error: Not all wb records stored' table = out.view(np.recarray) logger.debug('WBridge: Stored results as table with %(num_records)d entries.', vars()) self.table = table
Generate a normalised table of the results. The table is stored as a :class:`numpy.recarray` in the attribute :attr:`~WaterBridgeAnalysis.table`. The output format of :attr:`~WaterBridgeAnalysis.table` can also be changed using output_format in a fashion similar to :attr:`WaterBridgeAnalysis.timeseries`
venv/lib/python3.7/site-packages/MDAnalysis/analysis/hbonds/wbridge_analysis.py
generate_table
dtklinh/GBRDE
2
python
def generate_table(self, output_format=None): 'Generate a normalised table of the results.\n\n The table is stored as a :class:`numpy.recarray` in the\n attribute :attr:`~WaterBridgeAnalysis.table`.\n\n The output format of :attr:`~WaterBridgeAnalysis.table` can also be\n changed using output_format in a fashion similar to :attr:`WaterBridgeAnalysis.timeseries`\n ' output_format = (output_format or self.output_format) if (self._network == []): msg = 'No data computed, do run() first.' warnings.warn(msg, category=MissingDataWarning) logger.warning(msg) return None timeseries = self._generate_timeseries(output_format) num_records = np.sum([len(hframe) for hframe in timeseries]) if (output_format == 'sele1_sele2'): dtype = [('time', float), ('sele1_index', int), ('sele2_index', int), ('sele1_resnm', '|U4'), ('sele1_resid', int), ('sele1_atom', '|U4'), ('sele2_resnm', '|U4'), ('sele2_resid', int), ('sele2_atom', '|U4'), ('distance', float), ('angle', float)] elif (output_format == 'donor_acceptor'): dtype = [('time', float), ('donor_index', int), ('acceptor_index', int), ('donor_resnm', '|U4'), ('donor_resid', int), ('donor_atom', '|U4'), ('acceptor_resnm', '|U4'), ('acceptor_resid', int), ('acceptor_atom', '|U4'), ('distance', float), ('angle', float)] out = np.empty((num_records,), dtype=dtype) cursor = 0 for (t, hframe) in zip(self.timesteps, timeseries): for (donor_index, acceptor_index, donor, acceptor, distance, angle) in hframe: out[cursor] = ((((t, donor_index, acceptor_index) + donor) + acceptor) + (distance, angle)) cursor += 1 assert (cursor == num_records), 'Internal Error: Not all wb records stored' table = out.view(np.recarray) logger.debug('WBridge: Stored results as table with %(num_records)d entries.', vars()) self.table = table
def generate_table(self, output_format=None): 'Generate a normalised table of the results.\n\n The table is stored as a :class:`numpy.recarray` in the\n attribute :attr:`~WaterBridgeAnalysis.table`.\n\n The output format of :attr:`~WaterBridgeAnalysis.table` can also be\n changed using output_format in a fashion similar to :attr:`WaterBridgeAnalysis.timeseries`\n ' output_format = (output_format or self.output_format) if (self._network == []): msg = 'No data computed, do run() first.' warnings.warn(msg, category=MissingDataWarning) logger.warning(msg) return None timeseries = self._generate_timeseries(output_format) num_records = np.sum([len(hframe) for hframe in timeseries]) if (output_format == 'sele1_sele2'): dtype = [('time', float), ('sele1_index', int), ('sele2_index', int), ('sele1_resnm', '|U4'), ('sele1_resid', int), ('sele1_atom', '|U4'), ('sele2_resnm', '|U4'), ('sele2_resid', int), ('sele2_atom', '|U4'), ('distance', float), ('angle', float)] elif (output_format == 'donor_acceptor'): dtype = [('time', float), ('donor_index', int), ('acceptor_index', int), ('donor_resnm', '|U4'), ('donor_resid', int), ('donor_atom', '|U4'), ('acceptor_resnm', '|U4'), ('acceptor_resid', int), ('acceptor_atom', '|U4'), ('distance', float), ('angle', float)] out = np.empty((num_records,), dtype=dtype) cursor = 0 for (t, hframe) in zip(self.timesteps, timeseries): for (donor_index, acceptor_index, donor, acceptor, distance, angle) in hframe: out[cursor] = ((((t, donor_index, acceptor_index) + donor) + acceptor) + (distance, angle)) cursor += 1 assert (cursor == num_records), 'Internal Error: Not all wb records stored' table = out.view(np.recarray) logger.debug('WBridge: Stored results as table with %(num_records)d entries.', vars()) self.table = table<|docstring|>Generate a normalised table of the results. The table is stored as a :class:`numpy.recarray` in the attribute :attr:`~WaterBridgeAnalysis.table`. The output format of :attr:`~WaterBridgeAnalysis.table` can also be changed using output_format in a fashion similar to :attr:`WaterBridgeAnalysis.timeseries`<|endoftext|>
72c050dc5ed0a84dfdd3cd98ab5c117448cb8fc5c8f8070e1c698fb5da1aa3a1
@register.tag('block') def do_block(parser, token): '\n Define a block that can be overridden by child templates.\n ' bits = token.contents.split() if (len(bits) != 2): raise TemplateSyntaxError(("'%s' tag takes only one argument" % bits[0])) block_name = bits[1] try: if (block_name in parser.__loaded_blocks): raise TemplateSyntaxError(("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))) parser.__loaded_blocks.append(block_name) except AttributeError: parser.__loaded_blocks = [block_name] nodelist = parser.parse(('endblock',)) endblock = parser.next_token() acceptable_endblocks = ('endblock', ('endblock %s' % block_name)) if (endblock.contents not in acceptable_endblocks): parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks) return BlockNode(block_name, nodelist)
Define a block that can be overridden by child templates.
django/template/loader_tags.py
do_block
mavisguan/django
61,676
python
@register.tag('block') def do_block(parser, token): '\n \n ' bits = token.contents.split() if (len(bits) != 2): raise TemplateSyntaxError(("'%s' tag takes only one argument" % bits[0])) block_name = bits[1] try: if (block_name in parser.__loaded_blocks): raise TemplateSyntaxError(("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))) parser.__loaded_blocks.append(block_name) except AttributeError: parser.__loaded_blocks = [block_name] nodelist = parser.parse(('endblock',)) endblock = parser.next_token() acceptable_endblocks = ('endblock', ('endblock %s' % block_name)) if (endblock.contents not in acceptable_endblocks): parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks) return BlockNode(block_name, nodelist)
@register.tag('block') def do_block(parser, token): '\n \n ' bits = token.contents.split() if (len(bits) != 2): raise TemplateSyntaxError(("'%s' tag takes only one argument" % bits[0])) block_name = bits[1] try: if (block_name in parser.__loaded_blocks): raise TemplateSyntaxError(("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))) parser.__loaded_blocks.append(block_name) except AttributeError: parser.__loaded_blocks = [block_name] nodelist = parser.parse(('endblock',)) endblock = parser.next_token() acceptable_endblocks = ('endblock', ('endblock %s' % block_name)) if (endblock.contents not in acceptable_endblocks): parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks) return BlockNode(block_name, nodelist)<|docstring|>Define a block that can be overridden by child templates.<|endoftext|>
fced9cc6bfb770caf0a06adb05253c8e173d17a6627e9892252ad7644c06a57f
def construct_relative_path(current_template_name, relative_name): "\n Convert a relative path (starting with './' or '../') to the full template\n name based on the current_template_name.\n " new_name = relative_name.strip('\'"') if (not new_name.startswith(('./', '../'))): return relative_name new_name = posixpath.normpath(posixpath.join(posixpath.dirname(current_template_name.lstrip('/')), new_name)) if new_name.startswith('../'): raise TemplateSyntaxError(("The relative path '%s' points outside the file hierarchy that template '%s' is in." % (relative_name, current_template_name))) if (current_template_name.lstrip('/') == new_name): raise TemplateSyntaxError(("The relative path '%s' was translated to template name '%s', the same template in which the tag appears." % (relative_name, current_template_name))) has_quotes = (relative_name.startswith(('"', "'")) and (relative_name[0] == relative_name[(- 1)])) return (f'"{new_name}"' if has_quotes else new_name)
Convert a relative path (starting with './' or '../') to the full template name based on the current_template_name.
django/template/loader_tags.py
construct_relative_path
mavisguan/django
61,676
python
def construct_relative_path(current_template_name, relative_name): "\n Convert a relative path (starting with './' or '../') to the full template\n name based on the current_template_name.\n " new_name = relative_name.strip('\'"') if (not new_name.startswith(('./', '../'))): return relative_name new_name = posixpath.normpath(posixpath.join(posixpath.dirname(current_template_name.lstrip('/')), new_name)) if new_name.startswith('../'): raise TemplateSyntaxError(("The relative path '%s' points outside the file hierarchy that template '%s' is in." % (relative_name, current_template_name))) if (current_template_name.lstrip('/') == new_name): raise TemplateSyntaxError(("The relative path '%s' was translated to template name '%s', the same template in which the tag appears." % (relative_name, current_template_name))) has_quotes = (relative_name.startswith(('"', "'")) and (relative_name[0] == relative_name[(- 1)])) return (f'"{new_name}"' if has_quotes else new_name)
def construct_relative_path(current_template_name, relative_name): "\n Convert a relative path (starting with './' or '../') to the full template\n name based on the current_template_name.\n " new_name = relative_name.strip('\'"') if (not new_name.startswith(('./', '../'))): return relative_name new_name = posixpath.normpath(posixpath.join(posixpath.dirname(current_template_name.lstrip('/')), new_name)) if new_name.startswith('../'): raise TemplateSyntaxError(("The relative path '%s' points outside the file hierarchy that template '%s' is in." % (relative_name, current_template_name))) if (current_template_name.lstrip('/') == new_name): raise TemplateSyntaxError(("The relative path '%s' was translated to template name '%s', the same template in which the tag appears." % (relative_name, current_template_name))) has_quotes = (relative_name.startswith(('"', "'")) and (relative_name[0] == relative_name[(- 1)])) return (f'"{new_name}"' if has_quotes else new_name)<|docstring|>Convert a relative path (starting with './' or '../') to the full template name based on the current_template_name.<|endoftext|>
311f53f109ae9daacbcdcbe19048814cb60a1bd6024d9a582cb2544129216a53
@register.tag('extends') def do_extends(parser, token): '\n Signal that this template extends a parent template.\n\n This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)\n uses the literal value "base" as the name of the parent template to extend,\n or ``{% extends variable %}`` uses the value of ``variable`` as either the\n name of the parent template to extend (if it evaluates to a string) or as\n the parent template itself (if it evaluates to a Template object).\n ' bits = token.split_contents() if (len(bits) != 2): raise TemplateSyntaxError(("'%s' takes one argument" % bits[0])) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) parent_name = parser.compile_filter(bits[1]) nodelist = parser.parse() if nodelist.get_nodes_by_type(ExtendsNode): raise TemplateSyntaxError(("'%s' cannot appear more than once in the same template" % bits[0])) return ExtendsNode(nodelist, parent_name)
Signal that this template extends a parent template. This tag may be used in two ways: ``{% extends "base" %}`` (with quotes) uses the literal value "base" as the name of the parent template to extend, or ``{% extends variable %}`` uses the value of ``variable`` as either the name of the parent template to extend (if it evaluates to a string) or as the parent template itself (if it evaluates to a Template object).
django/template/loader_tags.py
do_extends
mavisguan/django
61,676
python
@register.tag('extends') def do_extends(parser, token): '\n Signal that this template extends a parent template.\n\n This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)\n uses the literal value "base" as the name of the parent template to extend,\n or ``{% extends variable %}`` uses the value of ``variable`` as either the\n name of the parent template to extend (if it evaluates to a string) or as\n the parent template itself (if it evaluates to a Template object).\n ' bits = token.split_contents() if (len(bits) != 2): raise TemplateSyntaxError(("'%s' takes one argument" % bits[0])) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) parent_name = parser.compile_filter(bits[1]) nodelist = parser.parse() if nodelist.get_nodes_by_type(ExtendsNode): raise TemplateSyntaxError(("'%s' cannot appear more than once in the same template" % bits[0])) return ExtendsNode(nodelist, parent_name)
@register.tag('extends') def do_extends(parser, token): '\n Signal that this template extends a parent template.\n\n This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)\n uses the literal value "base" as the name of the parent template to extend,\n or ``{% extends variable %}`` uses the value of ``variable`` as either the\n name of the parent template to extend (if it evaluates to a string) or as\n the parent template itself (if it evaluates to a Template object).\n ' bits = token.split_contents() if (len(bits) != 2): raise TemplateSyntaxError(("'%s' takes one argument" % bits[0])) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) parent_name = parser.compile_filter(bits[1]) nodelist = parser.parse() if nodelist.get_nodes_by_type(ExtendsNode): raise TemplateSyntaxError(("'%s' cannot appear more than once in the same template" % bits[0])) return ExtendsNode(nodelist, parent_name)<|docstring|>Signal that this template extends a parent template. This tag may be used in two ways: ``{% extends "base" %}`` (with quotes) uses the literal value "base" as the name of the parent template to extend, or ``{% extends variable %}`` uses the value of ``variable`` as either the name of the parent template to extend (if it evaluates to a string) or as the parent template itself (if it evaluates to a Template object).<|endoftext|>
c86992f9212c27fc3206459883bacbbfa756aeca49eed245facddda34dbd674e
@register.tag('include') def do_include(parser, token): '\n Load a template and render it with the current context. You can pass\n additional context using keyword arguments.\n\n Example::\n\n {% include "foo/some_include" %}\n {% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}\n\n Use the ``only`` argument to exclude the current context when rendering\n the included template::\n\n {% include "foo/some_include" only %}\n {% include "foo/some_include" with bar="1" only %}\n ' bits = token.split_contents() if (len(bits) < 2): raise TemplateSyntaxError(('%r tag takes at least one argument: the name of the template to be included.' % bits[0])) options = {} remaining_bits = bits[2:] while remaining_bits: option = remaining_bits.pop(0) if (option in options): raise TemplateSyntaxError(('The %r option was specified more than once.' % option)) if (option == 'with'): value = token_kwargs(remaining_bits, parser, support_legacy=False) if (not value): raise TemplateSyntaxError(('"with" in %r tag needs at least one keyword argument.' % bits[0])) elif (option == 'only'): value = True else: raise TemplateSyntaxError(('Unknown argument for %r tag: %r.' % (bits[0], option))) options[option] = value isolated_context = options.get('only', False) namemap = options.get('with', {}) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap, isolated_context=isolated_context)
Load a template and render it with the current context. You can pass additional context using keyword arguments. Example:: {% include "foo/some_include" %} {% include "foo/some_include" with bar="BAZZ!" baz="BING!" %} Use the ``only`` argument to exclude the current context when rendering the included template:: {% include "foo/some_include" only %} {% include "foo/some_include" with bar="1" only %}
django/template/loader_tags.py
do_include
mavisguan/django
61,676
python
@register.tag('include') def do_include(parser, token): '\n Load a template and render it with the current context. You can pass\n additional context using keyword arguments.\n\n Example::\n\n {% include "foo/some_include" %}\n {% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}\n\n Use the ``only`` argument to exclude the current context when rendering\n the included template::\n\n {% include "foo/some_include" only %}\n {% include "foo/some_include" with bar="1" only %}\n ' bits = token.split_contents() if (len(bits) < 2): raise TemplateSyntaxError(('%r tag takes at least one argument: the name of the template to be included.' % bits[0])) options = {} remaining_bits = bits[2:] while remaining_bits: option = remaining_bits.pop(0) if (option in options): raise TemplateSyntaxError(('The %r option was specified more than once.' % option)) if (option == 'with'): value = token_kwargs(remaining_bits, parser, support_legacy=False) if (not value): raise TemplateSyntaxError(('"with" in %r tag needs at least one keyword argument.' % bits[0])) elif (option == 'only'): value = True else: raise TemplateSyntaxError(('Unknown argument for %r tag: %r.' % (bits[0], option))) options[option] = value isolated_context = options.get('only', False) namemap = options.get('with', {}) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap, isolated_context=isolated_context)
@register.tag('include') def do_include(parser, token): '\n Load a template and render it with the current context. You can pass\n additional context using keyword arguments.\n\n Example::\n\n {% include "foo/some_include" %}\n {% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}\n\n Use the ``only`` argument to exclude the current context when rendering\n the included template::\n\n {% include "foo/some_include" only %}\n {% include "foo/some_include" with bar="1" only %}\n ' bits = token.split_contents() if (len(bits) < 2): raise TemplateSyntaxError(('%r tag takes at least one argument: the name of the template to be included.' % bits[0])) options = {} remaining_bits = bits[2:] while remaining_bits: option = remaining_bits.pop(0) if (option in options): raise TemplateSyntaxError(('The %r option was specified more than once.' % option)) if (option == 'with'): value = token_kwargs(remaining_bits, parser, support_legacy=False) if (not value): raise TemplateSyntaxError(('"with" in %r tag needs at least one keyword argument.' % bits[0])) elif (option == 'only'): value = True else: raise TemplateSyntaxError(('Unknown argument for %r tag: %r.' % (bits[0], option))) options[option] = value isolated_context = options.get('only', False) namemap = options.get('with', {}) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap, isolated_context=isolated_context)<|docstring|>Load a template and render it with the current context. You can pass additional context using keyword arguments. Example:: {% include "foo/some_include" %} {% include "foo/some_include" with bar="BAZZ!" baz="BING!" %} Use the ``only`` argument to exclude the current context when rendering the included template:: {% include "foo/some_include" only %} {% include "foo/some_include" with bar="1" only %}<|endoftext|>
fd232eb80a86b2ae756957fcd8aab143cda17c5c53c024cbbeec3e6e6da8bb1b
def find_template(self, template_name, context): '\n This is a wrapper around engine.find_template(). A history is kept in\n the render_context attribute between successive extends calls and\n passed as the skip argument. This enables extends to work recursively\n without extending the same template twice.\n ' history = context.render_context.setdefault(self.context_key, [self.origin]) (template, origin) = context.template.engine.find_template(template_name, skip=history) history.append(origin) return template
This is a wrapper around engine.find_template(). A history is kept in the render_context attribute between successive extends calls and passed as the skip argument. This enables extends to work recursively without extending the same template twice.
django/template/loader_tags.py
find_template
mavisguan/django
61,676
python
def find_template(self, template_name, context): '\n This is a wrapper around engine.find_template(). A history is kept in\n the render_context attribute between successive extends calls and\n passed as the skip argument. This enables extends to work recursively\n without extending the same template twice.\n ' history = context.render_context.setdefault(self.context_key, [self.origin]) (template, origin) = context.template.engine.find_template(template_name, skip=history) history.append(origin) return template
def find_template(self, template_name, context): '\n This is a wrapper around engine.find_template(). A history is kept in\n the render_context attribute between successive extends calls and\n passed as the skip argument. This enables extends to work recursively\n without extending the same template twice.\n ' history = context.render_context.setdefault(self.context_key, [self.origin]) (template, origin) = context.template.engine.find_template(template_name, skip=history) history.append(origin) return template<|docstring|>This is a wrapper around engine.find_template(). A history is kept in the render_context attribute between successive extends calls and passed as the skip argument. This enables extends to work recursively without extending the same template twice.<|endoftext|>
de11a21ce79f52525d01a93235aa6b667c1ffe1f9ceb68bb42cea676d7df67e4
def render(self, context): '\n Render the specified template and context. Cache the template object\n in render_context to avoid reparsing and loading when used in a for\n loop.\n ' template = self.template.resolve(context) if (not callable(getattr(template, 'render', None))): template_name = (template or ()) if isinstance(template_name, str): template_name = (construct_relative_path(self.origin.template_name, template_name),) else: template_name = tuple(template_name) cache = context.render_context.dicts[0].setdefault(self, {}) template = cache.get(template_name) if (template is None): template = context.template.engine.select_template(template_name) cache[template_name] = template elif hasattr(template, 'template'): template = template.template values = {name: var.resolve(context) for (name, var) in self.extra_context.items()} if self.isolated_context: return template.render(context.new(values)) with context.push(**values): return template.render(context)
Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop.
django/template/loader_tags.py
render
mavisguan/django
61,676
python
def render(self, context): '\n Render the specified template and context. Cache the template object\n in render_context to avoid reparsing and loading when used in a for\n loop.\n ' template = self.template.resolve(context) if (not callable(getattr(template, 'render', None))): template_name = (template or ()) if isinstance(template_name, str): template_name = (construct_relative_path(self.origin.template_name, template_name),) else: template_name = tuple(template_name) cache = context.render_context.dicts[0].setdefault(self, {}) template = cache.get(template_name) if (template is None): template = context.template.engine.select_template(template_name) cache[template_name] = template elif hasattr(template, 'template'): template = template.template values = {name: var.resolve(context) for (name, var) in self.extra_context.items()} if self.isolated_context: return template.render(context.new(values)) with context.push(**values): return template.render(context)
def render(self, context): '\n Render the specified template and context. Cache the template object\n in render_context to avoid reparsing and loading when used in a for\n loop.\n ' template = self.template.resolve(context) if (not callable(getattr(template, 'render', None))): template_name = (template or ()) if isinstance(template_name, str): template_name = (construct_relative_path(self.origin.template_name, template_name),) else: template_name = tuple(template_name) cache = context.render_context.dicts[0].setdefault(self, {}) template = cache.get(template_name) if (template is None): template = context.template.engine.select_template(template_name) cache[template_name] = template elif hasattr(template, 'template'): template = template.template values = {name: var.resolve(context) for (name, var) in self.extra_context.items()} if self.isolated_context: return template.render(context.new(values)) with context.push(**values): return template.render(context)<|docstring|>Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop.<|endoftext|>
c16f6a9858e9010100485c70d5be2024efd6df9e3ccacb5ea4b97d1c5bc25956
def migrate_run_partition(instance, print_fn=None): '\n Utility method to build an asset key index from the data in existing event log records.\n Takes in event_log_storage, and a print_fn to keep track of progress.\n ' if print_fn: print_fn('Querying run storage.') runs = instance.get_runs() if print_fn: runs = tqdm(runs) for run in runs: if (PARTITION_NAME_TAG not in run.tags): continue if (PARTITION_SET_TAG not in run.tags): continue instance.add_run_tags(run.run_id, run.tags)
Utility method to build an asset key index from the data in existing event log records. Takes in event_log_storage, and a print_fn to keep track of progress.
python_modules/dagster/dagster/core/storage/runs/migration.py
migrate_run_partition
leftrightleft/dagster
0
python
def migrate_run_partition(instance, print_fn=None): '\n Utility method to build an asset key index from the data in existing event log records.\n Takes in event_log_storage, and a print_fn to keep track of progress.\n ' if print_fn: print_fn('Querying run storage.') runs = instance.get_runs() if print_fn: runs = tqdm(runs) for run in runs: if (PARTITION_NAME_TAG not in run.tags): continue if (PARTITION_SET_TAG not in run.tags): continue instance.add_run_tags(run.run_id, run.tags)
def migrate_run_partition(instance, print_fn=None): '\n Utility method to build an asset key index from the data in existing event log records.\n Takes in event_log_storage, and a print_fn to keep track of progress.\n ' if print_fn: print_fn('Querying run storage.') runs = instance.get_runs() if print_fn: runs = tqdm(runs) for run in runs: if (PARTITION_NAME_TAG not in run.tags): continue if (PARTITION_SET_TAG not in run.tags): continue instance.add_run_tags(run.run_id, run.tags)<|docstring|>Utility method to build an asset key index from the data in existing event log records. Takes in event_log_storage, and a print_fn to keep track of progress.<|endoftext|>
b95a339aa2fc65d1dda2cb097d479d0ab9de207f134b31e3032d4c5659ac8786
def null_to_empty(val): 'Convert to empty string if the value is currently null.' if (not val): return '' return val
Convert to empty string if the value is currently null.
nautobot_golden_config/models.py
null_to_empty
nniehoff/nautobot-plugin-golden-config
0
python
def null_to_empty(val): if (not val): return return val
def null_to_empty(val): if (not val): return return val<|docstring|>Convert to empty string if the value is currently null.<|endoftext|>
98a692d9edf4901bdaa340e764f9efa058f48c9fb88ee389e4ee885c229e601e
def to_csv(self): 'Indicates model fields to return as csv.' return (self.name, self.slug, self.description)
Indicates model fields to return as csv.
nautobot_golden_config/models.py
to_csv
nniehoff/nautobot-plugin-golden-config
0
python
def to_csv(self): return (self.name, self.slug, self.description)
def to_csv(self): return (self.name, self.slug, self.description)<|docstring|>Indicates model fields to return as csv.<|endoftext|>
93dea0603195bc1719fe27c77a776e967e40eca43d40851126837ffa057dbde3
def __str__(self): 'Return a sane string representation of the instance.' return self.slug
Return a sane string representation of the instance.
nautobot_golden_config/models.py
__str__
nniehoff/nautobot-plugin-golden-config
0
python
def __str__(self): return self.slug
def __str__(self): return self.slug<|docstring|>Return a sane string representation of the instance.<|endoftext|>
23b961d2151cfe9fd9891bce8bf5fb09f32feb88b418de8df644130076d478fb
def get_absolute_url(self): 'Absolute url for the ComplianceFeature instance.' return reverse('plugins:nautobot_golden_config:compliancefeature', args=[self.pk])
Absolute url for the ComplianceFeature instance.
nautobot_golden_config/models.py
get_absolute_url
nniehoff/nautobot-plugin-golden-config
0
python
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:compliancefeature', args=[self.pk])
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:compliancefeature', args=[self.pk])<|docstring|>Absolute url for the ComplianceFeature instance.<|endoftext|>
69e91246522d3c70bce22cdba471b5657829f34556abbc605dadeccbbea6428d
def to_csv(self): 'Indicates model fields to return as csv.' return (self.platform.slug, self.feature.name, self.description, self.config_ordered, self.match_config, self.config_type)
Indicates model fields to return as csv.
nautobot_golden_config/models.py
to_csv
nniehoff/nautobot-plugin-golden-config
0
python
def to_csv(self): return (self.platform.slug, self.feature.name, self.description, self.config_ordered, self.match_config, self.config_type)
def to_csv(self): return (self.platform.slug, self.feature.name, self.description, self.config_ordered, self.match_config, self.config_type)<|docstring|>Indicates model fields to return as csv.<|endoftext|>
2f44bf1a950b4479697c315150f5074b1f832048bf7467d40af79100dd5a4b49
def __str__(self): 'Return a sane string representation of the instance.' return f'{self.platform} - {self.feature.name}'
Return a sane string representation of the instance.
nautobot_golden_config/models.py
__str__
nniehoff/nautobot-plugin-golden-config
0
python
def __str__(self): return f'{self.platform} - {self.feature.name}'
def __str__(self): return f'{self.platform} - {self.feature.name}'<|docstring|>Return a sane string representation of the instance.<|endoftext|>
ef89129c3bb14ef6d8601cf4699d037e894d59ef9ba9c4be6a90fa04241778a1
def get_absolute_url(self): 'Absolute url for the ComplianceRule instance.' return reverse('plugins:nautobot_golden_config:compliancerule', args=[self.pk])
Absolute url for the ComplianceRule instance.
nautobot_golden_config/models.py
get_absolute_url
nniehoff/nautobot-plugin-golden-config
0
python
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:compliancerule', args=[self.pk])
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:compliancerule', args=[self.pk])<|docstring|>Absolute url for the ComplianceRule instance.<|endoftext|>
8502a11783a6c83a429a457e1049253dadcefa035d0c792e2a1470f447070086
def clean(self): 'Verify that if cli, then match_config is set.' if ((self.config_type == ComplianceRuleTypeChoice.TYPE_CLI) and (not self.match_config)): raise ValidationError('CLI configuration set, but no configuration set to match.')
Verify that if cli, then match_config is set.
nautobot_golden_config/models.py
clean
nniehoff/nautobot-plugin-golden-config
0
python
def clean(self): if ((self.config_type == ComplianceRuleTypeChoice.TYPE_CLI) and (not self.match_config)): raise ValidationError('CLI configuration set, but no configuration set to match.')
def clean(self): if ((self.config_type == ComplianceRuleTypeChoice.TYPE_CLI) and (not self.match_config)): raise ValidationError('CLI configuration set, but no configuration set to match.')<|docstring|>Verify that if cli, then match_config is set.<|endoftext|>
612ebcc72b7160068cd827e80c7c5055e8a21f5f196cb4c9e3c24a4093ca4119
def get_absolute_url(self): 'Return absolute URL for instance.' return reverse('plugins:nautobot_golden_config:configcompliance', args=[self.pk])
Return absolute URL for instance.
nautobot_golden_config/models.py
get_absolute_url
nniehoff/nautobot-plugin-golden-config
0
python
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:configcompliance', args=[self.pk])
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:configcompliance', args=[self.pk])<|docstring|>Return absolute URL for instance.<|endoftext|>
fcf4be6193325c5df7c2f9b7ca9c1ac964add8916fd42900d101096d97689a96
def to_csv(self): 'Indicates model fields to return as csv.' return (self.device.name, self.rule.feature.name, self.compliance)
Indicates model fields to return as csv.
nautobot_golden_config/models.py
to_csv
nniehoff/nautobot-plugin-golden-config
0
python
def to_csv(self): return (self.device.name, self.rule.feature.name, self.compliance)
def to_csv(self): return (self.device.name, self.rule.feature.name, self.compliance)<|docstring|>Indicates model fields to return as csv.<|endoftext|>
1065f45a4904e98dbba65f6d962106085717038de11e90b3a94f49a0d3b01bd2
def to_objectchange(self, action): 'Remove actual and intended configuration from changelog.' return ObjectChange(changed_object=self, object_repr=str(self), action=action, object_data=serialize_object(self, exclude=['actual', 'intended']))
Remove actual and intended configuration from changelog.
nautobot_golden_config/models.py
to_objectchange
nniehoff/nautobot-plugin-golden-config
0
python
def to_objectchange(self, action): return ObjectChange(changed_object=self, object_repr=str(self), action=action, object_data=serialize_object(self, exclude=['actual', 'intended']))
def to_objectchange(self, action): return ObjectChange(changed_object=self, object_repr=str(self), action=action, object_data=serialize_object(self, exclude=['actual', 'intended']))<|docstring|>Remove actual and intended configuration from changelog.<|endoftext|>
0f80579010b37b423f6df1ea7cf0f94492f6e69bcf571ae3e5ce2df7c77f4187
def __str__(self): 'String representation of a the compliance.' return f'{self.device} -> {self.rule} -> {self.compliance}'
String representation of a the compliance.
nautobot_golden_config/models.py
__str__
nniehoff/nautobot-plugin-golden-config
0
python
def __str__(self): return f'{self.device} -> {self.rule} -> {self.compliance}'
def __str__(self): return f'{self.device} -> {self.rule} -> {self.compliance}'<|docstring|>String representation of a the compliance.<|endoftext|>
2e616c061e003959e32e02296c3777494ad50b1ff1447006957fa77230e8bfb1
def save(self, *args, **kwargs): 'Performs the actual compliance check.' feature = {'ordered': self.rule.config_ordered, 'name': self.rule} if (self.rule.config_type == ComplianceRuleTypeChoice.TYPE_JSON): feature.update({'section': self.rule.match_config}) diff = DeepDiff(self.actual, self.intended, ignore_order=self.ordered, report_repetition=True) if (not diff): self.compliance_int = 1 self.compliance = True self.missing = '' self.extra = '' else: self.compliance_int = 0 self.compliance = False self.missing = null_to_empty(self._normalize_diff(diff, 'added')) self.extra = null_to_empty(self._normalize_diff(diff, 'removed')) else: feature.update({'section': self.rule.match_config.splitlines()}) value = feature_compliance(feature, self.actual, self.intended, get_platform(self.device.platform.slug)) self.compliance = value['compliant'] if self.compliance: self.compliance_int = 1 else: self.compliance_int = 0 self.ordered = value['ordered_compliant'] self.missing = null_to_empty(value['missing']) self.extra = null_to_empty(value['extra']) super().save(*args, **kwargs)
Performs the actual compliance check.
nautobot_golden_config/models.py
save
nniehoff/nautobot-plugin-golden-config
0
python
def save(self, *args, **kwargs): feature = {'ordered': self.rule.config_ordered, 'name': self.rule} if (self.rule.config_type == ComplianceRuleTypeChoice.TYPE_JSON): feature.update({'section': self.rule.match_config}) diff = DeepDiff(self.actual, self.intended, ignore_order=self.ordered, report_repetition=True) if (not diff): self.compliance_int = 1 self.compliance = True self.missing = self.extra = else: self.compliance_int = 0 self.compliance = False self.missing = null_to_empty(self._normalize_diff(diff, 'added')) self.extra = null_to_empty(self._normalize_diff(diff, 'removed')) else: feature.update({'section': self.rule.match_config.splitlines()}) value = feature_compliance(feature, self.actual, self.intended, get_platform(self.device.platform.slug)) self.compliance = value['compliant'] if self.compliance: self.compliance_int = 1 else: self.compliance_int = 0 self.ordered = value['ordered_compliant'] self.missing = null_to_empty(value['missing']) self.extra = null_to_empty(value['extra']) super().save(*args, **kwargs)
def save(self, *args, **kwargs): feature = {'ordered': self.rule.config_ordered, 'name': self.rule} if (self.rule.config_type == ComplianceRuleTypeChoice.TYPE_JSON): feature.update({'section': self.rule.match_config}) diff = DeepDiff(self.actual, self.intended, ignore_order=self.ordered, report_repetition=True) if (not diff): self.compliance_int = 1 self.compliance = True self.missing = self.extra = else: self.compliance_int = 0 self.compliance = False self.missing = null_to_empty(self._normalize_diff(diff, 'added')) self.extra = null_to_empty(self._normalize_diff(diff, 'removed')) else: feature.update({'section': self.rule.match_config.splitlines()}) value = feature_compliance(feature, self.actual, self.intended, get_platform(self.device.platform.slug)) self.compliance = value['compliant'] if self.compliance: self.compliance_int = 1 else: self.compliance_int = 0 self.ordered = value['ordered_compliant'] self.missing = null_to_empty(value['missing']) self.extra = null_to_empty(value['extra']) super().save(*args, **kwargs)<|docstring|>Performs the actual compliance check.<|endoftext|>
44971dd84b5ecf745e730c85d2ae83dd88c88834f96ddd0ac46a1091f91bea00
@staticmethod def _normalize_diff(diff, path_to_diff): 'Normalizes the diff to a list of keys and list indexes that have changed.' dictionary_items = list(diff.get(f'dictionary_item_{path_to_diff}', [])) list_items = list(diff.get(f'iterable_item_{path_to_diff}', {}).keys()) values_changed = list(diff.get('values_changed', {}).keys()) type_changes = list(diff.get('type_changes', {}).keys()) return (((dictionary_items + list_items) + values_changed) + type_changes)
Normalizes the diff to a list of keys and list indexes that have changed.
nautobot_golden_config/models.py
_normalize_diff
nniehoff/nautobot-plugin-golden-config
0
python
@staticmethod def _normalize_diff(diff, path_to_diff): dictionary_items = list(diff.get(f'dictionary_item_{path_to_diff}', [])) list_items = list(diff.get(f'iterable_item_{path_to_diff}', {}).keys()) values_changed = list(diff.get('values_changed', {}).keys()) type_changes = list(diff.get('type_changes', {}).keys()) return (((dictionary_items + list_items) + values_changed) + type_changes)
@staticmethod def _normalize_diff(diff, path_to_diff): dictionary_items = list(diff.get(f'dictionary_item_{path_to_diff}', [])) list_items = list(diff.get(f'iterable_item_{path_to_diff}', {}).keys()) values_changed = list(diff.get('values_changed', {}).keys()) type_changes = list(diff.get('type_changes', {}).keys()) return (((dictionary_items + list_items) + values_changed) + type_changes)<|docstring|>Normalizes the diff to a list of keys and list indexes that have changed.<|endoftext|>
30f5dc7083332e8e5fee6b3e7ed0ae7ebba37a89ff177bda5df74230e9bb9e82
def to_csv(self): 'Indicates model fields to return as csv.' return (self.device, self.backup_last_attempt_date, self.backup_last_success_date, self.intended_last_attempt_date, self.intended_last_success_date, self.compliance_last_attempt_date, self.compliance_last_success_date)
Indicates model fields to return as csv.
nautobot_golden_config/models.py
to_csv
nniehoff/nautobot-plugin-golden-config
0
python
def to_csv(self): return (self.device, self.backup_last_attempt_date, self.backup_last_success_date, self.intended_last_attempt_date, self.intended_last_success_date, self.compliance_last_attempt_date, self.compliance_last_success_date)
def to_csv(self): return (self.device, self.backup_last_attempt_date, self.backup_last_success_date, self.intended_last_attempt_date, self.intended_last_success_date, self.compliance_last_attempt_date, self.compliance_last_success_date)<|docstring|>Indicates model fields to return as csv.<|endoftext|>
3874d0896041318e14e64ce0093c971129ea5bc7eaaf38fef0073d0a95734cca
def to_objectchange(self, action): 'Remove actual and intended configuration from changelog.' return ObjectChange(changed_object=self, object_repr=str(self), action=action, object_data=serialize_object(self, exclude=['backup_config', 'intended_config', 'compliance_config']))
Remove actual and intended configuration from changelog.
nautobot_golden_config/models.py
to_objectchange
nniehoff/nautobot-plugin-golden-config
0
python
def to_objectchange(self, action): return ObjectChange(changed_object=self, object_repr=str(self), action=action, object_data=serialize_object(self, exclude=['backup_config', 'intended_config', 'compliance_config']))
def to_objectchange(self, action): return ObjectChange(changed_object=self, object_repr=str(self), action=action, object_data=serialize_object(self, exclude=['backup_config', 'intended_config', 'compliance_config']))<|docstring|>Remove actual and intended configuration from changelog.<|endoftext|>
49898db18bea2a8f173c9e0204a4ebf0bef7bd5567e4fe1c4c6d452988dd00e9
def __str__(self): 'String representation of a the compliance.' return f'{self.device}'
String representation of a the compliance.
nautobot_golden_config/models.py
__str__
nniehoff/nautobot-plugin-golden-config
0
python
def __str__(self): return f'{self.device}'
def __str__(self): return f'{self.device}'<|docstring|>String representation of a the compliance.<|endoftext|>
6fca937f44390a4530f8780796e8e27302d43563602a998fb4342ab12b306edc
def get_absolute_url(self): 'Return absolute URL for instance.' return reverse('plugins:nautobot_golden_config:goldenconfigsetting')
Return absolute URL for instance.
nautobot_golden_config/models.py
get_absolute_url
nniehoff/nautobot-plugin-golden-config
0
python
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:goldenconfigsetting')
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:goldenconfigsetting')<|docstring|>Return absolute URL for instance.<|endoftext|>
e96a4428ba892ebc9ccca4f5a433b58e78983880b2bd27d40da1b6e647bf5488
def __str__(self): 'Return a simple string if model is called.' return 'Golden Config Settings'
Return a simple string if model is called.
nautobot_golden_config/models.py
__str__
nniehoff/nautobot-plugin-golden-config
0
python
def __str__(self): return 'Golden Config Settings'
def __str__(self): return 'Golden Config Settings'<|docstring|>Return a simple string if model is called.<|endoftext|>
7711a8d7a4d88bbfab2fc62f3ea33f2c8f790266a046c01b1e8cfebaaad51ab0
def delete(self, *args, **kwargs): 'Enforce the singleton pattern, there is no way to delete the configurations.'
Enforce the singleton pattern, there is no way to delete the configurations.
nautobot_golden_config/models.py
delete
nniehoff/nautobot-plugin-golden-config
0
python
def delete(self, *args, **kwargs):
def delete(self, *args, **kwargs): <|docstring|>Enforce the singleton pattern, there is no way to delete the configurations.<|endoftext|>
545deb73673a500ea10a0627a19dbe539cf45e6b647d25b6f78e8c3b7322f1ef
@classmethod def load(cls): 'Enforce the singleton pattern, fail it somehow more than one instance.' if (len(cls.objects.all()) != 1): raise ValidationError('There was an error where more than one instance existed for a setting.') return cls.objects.first()
Enforce the singleton pattern, fail it somehow more than one instance.
nautobot_golden_config/models.py
load
nniehoff/nautobot-plugin-golden-config
0
python
@classmethod def load(cls): if (len(cls.objects.all()) != 1): raise ValidationError('There was an error where more than one instance existed for a setting.') return cls.objects.first()
@classmethod def load(cls): if (len(cls.objects.all()) != 1): raise ValidationError('There was an error where more than one instance existed for a setting.') return cls.objects.first()<|docstring|>Enforce the singleton pattern, fail it somehow more than one instance.<|endoftext|>
03c817d6eb2297bd0db5e5e2475c35839233aa92634047d6249766aaff4310bf
def clean(self): 'Validate there is only one model and if there is a GraphQL query, that it is valid.' super().clean() if self.sot_agg_query: try: LOGGER.debug('GraphQL - test query: `%s`', str(self.sot_agg_query)) backend = get_default_backend() schema = graphene_settings.SCHEMA backend.document_from_string(schema, str(self.sot_agg_query)) except GraphQLSyntaxError as error: raise ValidationError(str(error)) LOGGER.debug('GraphQL - test query start with: `%s`', GRAPHQL_STR_START) if (not str(self.sot_agg_query).startswith(GRAPHQL_STR_START)): raise ValidationError(f'The GraphQL query must start with exactly `{GRAPHQL_STR_START}`') if self.scope: filterset_class = get_filterset_for_model(Device) filterset = filterset_class(self.scope, Device.objects.all()) if filterset.errors: for key in filterset.errors: error_message = ', '.join(filterset.errors[key]) raise ValidationError({'scope': f'{key}: {error_message}'}) filterset_params = set(filterset.get_filters().keys()) for key in self.scope.keys(): if (key not in filterset_params): raise ValidationError({'scope': f"'{key}' is not a valid filter parameter for Device object"})
Validate there is only one model and if there is a GraphQL query, that it is valid.
nautobot_golden_config/models.py
clean
nniehoff/nautobot-plugin-golden-config
0
python
def clean(self): super().clean() if self.sot_agg_query: try: LOGGER.debug('GraphQL - test query: `%s`', str(self.sot_agg_query)) backend = get_default_backend() schema = graphene_settings.SCHEMA backend.document_from_string(schema, str(self.sot_agg_query)) except GraphQLSyntaxError as error: raise ValidationError(str(error)) LOGGER.debug('GraphQL - test query start with: `%s`', GRAPHQL_STR_START) if (not str(self.sot_agg_query).startswith(GRAPHQL_STR_START)): raise ValidationError(f'The GraphQL query must start with exactly `{GRAPHQL_STR_START}`') if self.scope: filterset_class = get_filterset_for_model(Device) filterset = filterset_class(self.scope, Device.objects.all()) if filterset.errors: for key in filterset.errors: error_message = ', '.join(filterset.errors[key]) raise ValidationError({'scope': f'{key}: {error_message}'}) filterset_params = set(filterset.get_filters().keys()) for key in self.scope.keys(): if (key not in filterset_params): raise ValidationError({'scope': f"'{key}' is not a valid filter parameter for Device object"})
def clean(self): super().clean() if self.sot_agg_query: try: LOGGER.debug('GraphQL - test query: `%s`', str(self.sot_agg_query)) backend = get_default_backend() schema = graphene_settings.SCHEMA backend.document_from_string(schema, str(self.sot_agg_query)) except GraphQLSyntaxError as error: raise ValidationError(str(error)) LOGGER.debug('GraphQL - test query start with: `%s`', GRAPHQL_STR_START) if (not str(self.sot_agg_query).startswith(GRAPHQL_STR_START)): raise ValidationError(f'The GraphQL query must start with exactly `{GRAPHQL_STR_START}`') if self.scope: filterset_class = get_filterset_for_model(Device) filterset = filterset_class(self.scope, Device.objects.all()) if filterset.errors: for key in filterset.errors: error_message = ', '.join(filterset.errors[key]) raise ValidationError({'scope': f'{key}: {error_message}'}) filterset_params = set(filterset.get_filters().keys()) for key in self.scope.keys(): if (key not in filterset_params): raise ValidationError({'scope': f"'{key}' is not a valid filter parameter for Device object"})<|docstring|>Validate there is only one model and if there is a GraphQL query, that it is valid.<|endoftext|>
ad7426cf53792cb840c09fa1af4347c6568ead730dd39b82371069d58fa98e5b
def get_queryset(self): 'Generate a Device QuerySet from the filter.' if (not self.scope): return Device.objects.all() filterset_class = get_filterset_for_model(Device) filterset = filterset_class(self.scope, Device.objects.all()) return filterset.qs
Generate a Device QuerySet from the filter.
nautobot_golden_config/models.py
get_queryset
nniehoff/nautobot-plugin-golden-config
0
python
def get_queryset(self): if (not self.scope): return Device.objects.all() filterset_class = get_filterset_for_model(Device) filterset = filterset_class(self.scope, Device.objects.all()) return filterset.qs
def get_queryset(self): if (not self.scope): return Device.objects.all() filterset_class = get_filterset_for_model(Device) filterset = filterset_class(self.scope, Device.objects.all()) return filterset.qs<|docstring|>Generate a Device QuerySet from the filter.<|endoftext|>
b635e1ae5cbd00910c0b6bca36e2e471890a55ec365a4edbfe9bbe0a7eb888dc
def device_count(self): 'Return the number of devices in the group.' return self.get_queryset().count()
Return the number of devices in the group.
nautobot_golden_config/models.py
device_count
nniehoff/nautobot-plugin-golden-config
0
python
def device_count(self): return self.get_queryset().count()
def device_count(self): return self.get_queryset().count()<|docstring|>Return the number of devices in the group.<|endoftext|>
2dd938d79a9759ae8e0bd821d4210d6653e86033c60a806752ad9606bb950572
def get_filter_as_string(self): 'Get filter as string.' if (not self.scope): return None result = '' for (key, value) in self.scope.items(): if isinstance(value, list): for item in value: if (result != ''): result += '&' result += f'{key}={item}' else: result += '&' result += f'{key}={value}' return result
Get filter as string.
nautobot_golden_config/models.py
get_filter_as_string
nniehoff/nautobot-plugin-golden-config
0
python
def get_filter_as_string(self): if (not self.scope): return None result = for (key, value) in self.scope.items(): if isinstance(value, list): for item in value: if (result != ): result += '&' result += f'{key}={item}' else: result += '&' result += f'{key}={value}' return result
def get_filter_as_string(self): if (not self.scope): return None result = for (key, value) in self.scope.items(): if isinstance(value, list): for item in value: if (result != ): result += '&' result += f'{key}={item}' else: result += '&' result += f'{key}={value}' return result<|docstring|>Get filter as string.<|endoftext|>
2f6789ef98cdf357680a5c304bffa76504732e448bde4b1b871e25bf3d163190
def get_url_to_filtered_device_list(self): 'Get url to all devices that are matching the filter.' base_url = reverse('dcim:device_list') filter_str = self.get_filter_as_string() if filter_str: return f'{base_url}?{filter_str}' return base_url
Get url to all devices that are matching the filter.
nautobot_golden_config/models.py
get_url_to_filtered_device_list
nniehoff/nautobot-plugin-golden-config
0
python
def get_url_to_filtered_device_list(self): base_url = reverse('dcim:device_list') filter_str = self.get_filter_as_string() if filter_str: return f'{base_url}?{filter_str}' return base_url
def get_url_to_filtered_device_list(self): base_url = reverse('dcim:device_list') filter_str = self.get_filter_as_string() if filter_str: return f'{base_url}?{filter_str}' return base_url<|docstring|>Get url to all devices that are matching the filter.<|endoftext|>
1df13d43e0a46b7ad1b58ed8b8313971216ff89cd9de08c0a6ddaaaf813aef32
def to_csv(self): 'Indicates model fields to return as csv.' return (self.name, self.platform.slug, self.regex)
Indicates model fields to return as csv.
nautobot_golden_config/models.py
to_csv
nniehoff/nautobot-plugin-golden-config
0
python
def to_csv(self): return (self.name, self.platform.slug, self.regex)
def to_csv(self): return (self.name, self.platform.slug, self.regex)<|docstring|>Indicates model fields to return as csv.<|endoftext|>
d19470e85127c01ff23c1c46775e9cc25120c40a79205b70e6f68b570cd11c56
def __str__(self): 'Return a simple string if model is called.' return self.name
Return a simple string if model is called.
nautobot_golden_config/models.py
__str__
nniehoff/nautobot-plugin-golden-config
0
python
def __str__(self): return self.name
def __str__(self): return self.name<|docstring|>Return a simple string if model is called.<|endoftext|>
930abf51b3e71ef61eb2b509aab412d9d3cf900bdb736e05be0c34dbb87d53b3
def get_absolute_url(self): 'Return absolute URL for instance.' return reverse('plugins:nautobot_golden_config:configremove', args=[self.pk])
Return absolute URL for instance.
nautobot_golden_config/models.py
get_absolute_url
nniehoff/nautobot-plugin-golden-config
0
python
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:configremove', args=[self.pk])
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:configremove', args=[self.pk])<|docstring|>Return absolute URL for instance.<|endoftext|>
0c1b5daf1c9c610745eeb7a2f8ad907fe6a1019c5754585c6d9482027f3919d1
def to_csv(self): 'Indicates model fields to return as csv.' return (self.name, self.platform.slug, self.regex, self.replace)
Indicates model fields to return as csv.
nautobot_golden_config/models.py
to_csv
nniehoff/nautobot-plugin-golden-config
0
python
def to_csv(self): return (self.name, self.platform.slug, self.regex, self.replace)
def to_csv(self): return (self.name, self.platform.slug, self.regex, self.replace)<|docstring|>Indicates model fields to return as csv.<|endoftext|>
45dc5c726fca5af373248dfb28587c74aed4574c7dfcdd5dea7d281b3b49247c
def get_absolute_url(self): 'Return absolute URL for instance.' return reverse('plugins:nautobot_golden_config:configreplace', args=[self.pk])
Return absolute URL for instance.
nautobot_golden_config/models.py
get_absolute_url
nniehoff/nautobot-plugin-golden-config
0
python
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:configreplace', args=[self.pk])
def get_absolute_url(self): return reverse('plugins:nautobot_golden_config:configreplace', args=[self.pk])<|docstring|>Return absolute URL for instance.<|endoftext|>
d19470e85127c01ff23c1c46775e9cc25120c40a79205b70e6f68b570cd11c56
def __str__(self): 'Return a simple string if model is called.' return self.name
Return a simple string if model is called.
nautobot_golden_config/models.py
__str__
nniehoff/nautobot-plugin-golden-config
0
python
def __str__(self): return self.name
def __str__(self): return self.name<|docstring|>Return a simple string if model is called.<|endoftext|>
709f7dade87f33167b0df3026b7cc6b3b7a07cc00d36b761f09119f6f29cfc94
def handle(self, *args, **options): 'Automatically called when the load_questions command is given.' base_path = settings.QUESTIONS_BASE_PATH questions_structure_file = 'questions.yaml' factory = LoaderFactory() factory.create_questions_loader(structure_filename=questions_structure_file, base_path=base_path).load()
Automatically called when the load_questions command is given.
codewof/programming/management/commands/load_questions.py
handle
jimbonothing64/codewof
3
python
def handle(self, *args, **options): base_path = settings.QUESTIONS_BASE_PATH questions_structure_file = 'questions.yaml' factory = LoaderFactory() factory.create_questions_loader(structure_filename=questions_structure_file, base_path=base_path).load()
def handle(self, *args, **options): base_path = settings.QUESTIONS_BASE_PATH questions_structure_file = 'questions.yaml' factory = LoaderFactory() factory.create_questions_loader(structure_filename=questions_structure_file, base_path=base_path).load()<|docstring|>Automatically called when the load_questions command is given.<|endoftext|>
40d9ccdd1353fca8abdd72421d78bbcbe8310ddd247fd4093731b38a13c0a3fa
@property def do_store(self): 'Whether this literal should be stored for use by future backreference chunks.\n \n See the documentation of the `backreference_body` type for details about backreference chunks.\n ' if hasattr(self, '_m_do_store'): return (self._m_do_store if hasattr(self, '_m_do_store') else None) self._m_do_store = ((self.tag & 16) != 0) return (self._m_do_store if hasattr(self, '_m_do_store') else None)
Whether this literal should be stored for use by future backreference chunks. See the documentation of the `backreference_body` type for details about backreference chunks.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
do_store
Mahlet-Inc/Hobbits
304
python
@property def do_store(self): 'Whether this literal should be stored for use by future backreference chunks.\n \n See the documentation of the `backreference_body` type for details about backreference chunks.\n ' if hasattr(self, '_m_do_store'): return (self._m_do_store if hasattr(self, '_m_do_store') else None) self._m_do_store = ((self.tag & 16) != 0) return (self._m_do_store if hasattr(self, '_m_do_store') else None)
@property def do_store(self): 'Whether this literal should be stored for use by future backreference chunks.\n \n See the documentation of the `backreference_body` type for details about backreference chunks.\n ' if hasattr(self, '_m_do_store'): return (self._m_do_store if hasattr(self, '_m_do_store') else None) self._m_do_store = ((self.tag & 16) != 0) return (self._m_do_store if hasattr(self, '_m_do_store') else None)<|docstring|>Whether this literal should be stored for use by future backreference chunks. See the documentation of the `backreference_body` type for details about backreference chunks.<|endoftext|>
4a8ab5b6300690d4eee47c820e0de7a687e06599dd1e3feaaf1e19f746633bd3
@property def len_literal_div2(self): 'The length of the literal data,\n in two-byte units.\n \n In practice,\n this value is always greater than zero,\n as there is no use in storing a zero-length literal.\n ' if hasattr(self, '_m_len_literal_div2'): return (self._m_len_literal_div2 if hasattr(self, '_m_len_literal_div2') else None) self._m_len_literal_div2 = (self.len_literal_div2_separate if self.is_len_literal_div2_separate else self.len_literal_div2_in_tag) return (self._m_len_literal_div2 if hasattr(self, '_m_len_literal_div2') else None)
The length of the literal data, in two-byte units. In practice, this value is always greater than zero, as there is no use in storing a zero-length literal.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
len_literal_div2
Mahlet-Inc/Hobbits
304
python
@property def len_literal_div2(self): 'The length of the literal data,\n in two-byte units.\n \n In practice,\n this value is always greater than zero,\n as there is no use in storing a zero-length literal.\n ' if hasattr(self, '_m_len_literal_div2'): return (self._m_len_literal_div2 if hasattr(self, '_m_len_literal_div2') else None) self._m_len_literal_div2 = (self.len_literal_div2_separate if self.is_len_literal_div2_separate else self.len_literal_div2_in_tag) return (self._m_len_literal_div2 if hasattr(self, '_m_len_literal_div2') else None)
@property def len_literal_div2(self): 'The length of the literal data,\n in two-byte units.\n \n In practice,\n this value is always greater than zero,\n as there is no use in storing a zero-length literal.\n ' if hasattr(self, '_m_len_literal_div2'): return (self._m_len_literal_div2 if hasattr(self, '_m_len_literal_div2') else None) self._m_len_literal_div2 = (self.len_literal_div2_separate if self.is_len_literal_div2_separate else self.len_literal_div2_in_tag) return (self._m_len_literal_div2 if hasattr(self, '_m_len_literal_div2') else None)<|docstring|>The length of the literal data, in two-byte units. In practice, this value is always greater than zero, as there is no use in storing a zero-length literal.<|endoftext|>
7e6d359983f7833dd12c7cc71f8d067fc3860b89baa03dde1624490918e4854a
@property def len_literal(self): 'The length of the literal data,\n in bytes.\n ' if hasattr(self, '_m_len_literal'): return (self._m_len_literal if hasattr(self, '_m_len_literal') else None) self._m_len_literal = (self.len_literal_div2 * 2) return (self._m_len_literal if hasattr(self, '_m_len_literal') else None)
The length of the literal data, in bytes.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
len_literal
Mahlet-Inc/Hobbits
304
python
@property def len_literal(self): 'The length of the literal data,\n in bytes.\n ' if hasattr(self, '_m_len_literal'): return (self._m_len_literal if hasattr(self, '_m_len_literal') else None) self._m_len_literal = (self.len_literal_div2 * 2) return (self._m_len_literal if hasattr(self, '_m_len_literal') else None)
@property def len_literal(self): 'The length of the literal data,\n in bytes.\n ' if hasattr(self, '_m_len_literal'): return (self._m_len_literal if hasattr(self, '_m_len_literal') else None) self._m_len_literal = (self.len_literal_div2 * 2) return (self._m_len_literal if hasattr(self, '_m_len_literal') else None)<|docstring|>The length of the literal data, in bytes.<|endoftext|>
37f5a17496cda9eff0aac8d6374e12dea858a5506550617b91732ad0e4ee93f9
@property def len_literal_div2_in_tag(self): 'The part of the tag byte that indicates the length of the literal data,\n in two-byte units.\n If this value is 0,\n the length is stored in a separate byte after the tag byte and before the literal data.\n ' if hasattr(self, '_m_len_literal_div2_in_tag'): return (self._m_len_literal_div2_in_tag if hasattr(self, '_m_len_literal_div2_in_tag') else None) self._m_len_literal_div2_in_tag = (self.tag & 15) return (self._m_len_literal_div2_in_tag if hasattr(self, '_m_len_literal_div2_in_tag') else None)
The part of the tag byte that indicates the length of the literal data, in two-byte units. If this value is 0, the length is stored in a separate byte after the tag byte and before the literal data.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
len_literal_div2_in_tag
Mahlet-Inc/Hobbits
304
python
@property def len_literal_div2_in_tag(self): 'The part of the tag byte that indicates the length of the literal data,\n in two-byte units.\n If this value is 0,\n the length is stored in a separate byte after the tag byte and before the literal data.\n ' if hasattr(self, '_m_len_literal_div2_in_tag'): return (self._m_len_literal_div2_in_tag if hasattr(self, '_m_len_literal_div2_in_tag') else None) self._m_len_literal_div2_in_tag = (self.tag & 15) return (self._m_len_literal_div2_in_tag if hasattr(self, '_m_len_literal_div2_in_tag') else None)
@property def len_literal_div2_in_tag(self): 'The part of the tag byte that indicates the length of the literal data,\n in two-byte units.\n If this value is 0,\n the length is stored in a separate byte after the tag byte and before the literal data.\n ' if hasattr(self, '_m_len_literal_div2_in_tag'): return (self._m_len_literal_div2_in_tag if hasattr(self, '_m_len_literal_div2_in_tag') else None) self._m_len_literal_div2_in_tag = (self.tag & 15) return (self._m_len_literal_div2_in_tag if hasattr(self, '_m_len_literal_div2_in_tag') else None)<|docstring|>The part of the tag byte that indicates the length of the literal data, in two-byte units. If this value is 0, the length is stored in a separate byte after the tag byte and before the literal data.<|endoftext|>
474dc58f35658b34f13bc41f17516987c53f0bbac7fb50076e38b68cb8f21869
@property def is_len_literal_div2_separate(self): 'Whether the length of the literal is stored separately from the tag.\n ' if hasattr(self, '_m_is_len_literal_div2_separate'): return (self._m_is_len_literal_div2_separate if hasattr(self, '_m_is_len_literal_div2_separate') else None) self._m_is_len_literal_div2_separate = (self.len_literal_div2_in_tag == 0) return (self._m_is_len_literal_div2_separate if hasattr(self, '_m_is_len_literal_div2_separate') else None)
Whether the length of the literal is stored separately from the tag.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
is_len_literal_div2_separate
Mahlet-Inc/Hobbits
304
python
@property def is_len_literal_div2_separate(self): '\n ' if hasattr(self, '_m_is_len_literal_div2_separate'): return (self._m_is_len_literal_div2_separate if hasattr(self, '_m_is_len_literal_div2_separate') else None) self._m_is_len_literal_div2_separate = (self.len_literal_div2_in_tag == 0) return (self._m_is_len_literal_div2_separate if hasattr(self, '_m_is_len_literal_div2_separate') else None)
@property def is_len_literal_div2_separate(self): '\n ' if hasattr(self, '_m_is_len_literal_div2_separate'): return (self._m_is_len_literal_div2_separate if hasattr(self, '_m_is_len_literal_div2_separate') else None) self._m_is_len_literal_div2_separate = (self.len_literal_div2_in_tag == 0) return (self._m_is_len_literal_div2_separate if hasattr(self, '_m_is_len_literal_div2_separate') else None)<|docstring|>Whether the length of the literal is stored separately from the tag.<|endoftext|>
10477e6a7f9f45518d68fba6f8e3ec6bcaf5b4ca20a20ed206c85de0aa729a96
@property def is_index_separate(self): 'Whether the index is stored separately from the tag.\n ' if hasattr(self, '_m_is_index_separate'): return (self._m_is_index_separate if hasattr(self, '_m_is_index_separate') else None) self._m_is_index_separate = ((self.tag >= 32) and (self.tag <= 34)) return (self._m_is_index_separate if hasattr(self, '_m_is_index_separate') else None)
Whether the index is stored separately from the tag.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
is_index_separate
Mahlet-Inc/Hobbits
304
python
@property def is_index_separate(self): '\n ' if hasattr(self, '_m_is_index_separate'): return (self._m_is_index_separate if hasattr(self, '_m_is_index_separate') else None) self._m_is_index_separate = ((self.tag >= 32) and (self.tag <= 34)) return (self._m_is_index_separate if hasattr(self, '_m_is_index_separate') else None)
@property def is_index_separate(self): '\n ' if hasattr(self, '_m_is_index_separate'): return (self._m_is_index_separate if hasattr(self, '_m_is_index_separate') else None) self._m_is_index_separate = ((self.tag >= 32) and (self.tag <= 34)) return (self._m_is_index_separate if hasattr(self, '_m_is_index_separate') else None)<|docstring|>Whether the index is stored separately from the tag.<|endoftext|>
ac0294dd560df19b16115b375b1becad7ac95254be4d1e18bd8e4ea85436d273
@property def index_in_tag(self): 'The index of the referenced literal chunk,\n as stored in the tag byte.\n ' if hasattr(self, '_m_index_in_tag'): return (self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None) self._m_index_in_tag = (self.tag - 35) return (self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None)
The index of the referenced literal chunk, as stored in the tag byte.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
index_in_tag
Mahlet-Inc/Hobbits
304
python
@property def index_in_tag(self): 'The index of the referenced literal chunk,\n as stored in the tag byte.\n ' if hasattr(self, '_m_index_in_tag'): return (self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None) self._m_index_in_tag = (self.tag - 35) return (self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None)
@property def index_in_tag(self): 'The index of the referenced literal chunk,\n as stored in the tag byte.\n ' if hasattr(self, '_m_index_in_tag'): return (self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None) self._m_index_in_tag = (self.tag - 35) return (self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None)<|docstring|>The index of the referenced literal chunk, as stored in the tag byte.<|endoftext|>
a367ec0e380b61f46dc9525701d0fd7418a8eec3b1e884668f9622830bd80355
@property def index_separate(self): 'The index of the referenced literal chunk,\n as stored separately from the tag byte,\n with the implicit offset corrected for.\n ' if hasattr(self, '_m_index_separate'): return (self._m_index_separate if hasattr(self, '_m_index_separate') else None) if self.is_index_separate: self._m_index_separate = ((self.index_separate_minus + 40) + (256 if (self.tag == 33) else 0)) return (self._m_index_separate if hasattr(self, '_m_index_separate') else None)
The index of the referenced literal chunk, as stored separately from the tag byte, with the implicit offset corrected for.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
index_separate
Mahlet-Inc/Hobbits
304
python
@property def index_separate(self): 'The index of the referenced literal chunk,\n as stored separately from the tag byte,\n with the implicit offset corrected for.\n ' if hasattr(self, '_m_index_separate'): return (self._m_index_separate if hasattr(self, '_m_index_separate') else None) if self.is_index_separate: self._m_index_separate = ((self.index_separate_minus + 40) + (256 if (self.tag == 33) else 0)) return (self._m_index_separate if hasattr(self, '_m_index_separate') else None)
@property def index_separate(self): 'The index of the referenced literal chunk,\n as stored separately from the tag byte,\n with the implicit offset corrected for.\n ' if hasattr(self, '_m_index_separate'): return (self._m_index_separate if hasattr(self, '_m_index_separate') else None) if self.is_index_separate: self._m_index_separate = ((self.index_separate_minus + 40) + (256 if (self.tag == 33) else 0)) return (self._m_index_separate if hasattr(self, '_m_index_separate') else None)<|docstring|>The index of the referenced literal chunk, as stored separately from the tag byte, with the implicit offset corrected for.<|endoftext|>
1e448596ebb6a8750d1b65708422a30522c8e6358d8ff74d91fae3f3a58f0c8c
@property def index(self): 'The index of the referenced literal chunk.\n \n Stored literals are assigned index numbers in the order in which they appear in the compressed data,\n starting at 0.\n Non-stored literals are not counted in the numbering and cannot be referenced using backreferences.\n Once an index is assigned to a stored literal,\n it is never changed or unassigned for the entire length of the compressed data.\n \n As the name indicates,\n a backreference can only reference stored literal chunks found *before* the backreference,\n not ones that come after it.\n ' if hasattr(self, '_m_index'): return (self._m_index if hasattr(self, '_m_index') else None) self._m_index = (self.index_separate if self.is_index_separate else self.index_in_tag) return (self._m_index if hasattr(self, '_m_index') else None)
The index of the referenced literal chunk. Stored literals are assigned index numbers in the order in which they appear in the compressed data, starting at 0. Non-stored literals are not counted in the numbering and cannot be referenced using backreferences. Once an index is assigned to a stored literal, it is never changed or unassigned for the entire length of the compressed data. As the name indicates, a backreference can only reference stored literal chunks found *before* the backreference, not ones that come after it.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
index
Mahlet-Inc/Hobbits
304
python
@property def index(self): 'The index of the referenced literal chunk.\n \n Stored literals are assigned index numbers in the order in which they appear in the compressed data,\n starting at 0.\n Non-stored literals are not counted in the numbering and cannot be referenced using backreferences.\n Once an index is assigned to a stored literal,\n it is never changed or unassigned for the entire length of the compressed data.\n \n As the name indicates,\n a backreference can only reference stored literal chunks found *before* the backreference,\n not ones that come after it.\n ' if hasattr(self, '_m_index'): return (self._m_index if hasattr(self, '_m_index') else None) self._m_index = (self.index_separate if self.is_index_separate else self.index_in_tag) return (self._m_index if hasattr(self, '_m_index') else None)
@property def index(self): 'The index of the referenced literal chunk.\n \n Stored literals are assigned index numbers in the order in which they appear in the compressed data,\n starting at 0.\n Non-stored literals are not counted in the numbering and cannot be referenced using backreferences.\n Once an index is assigned to a stored literal,\n it is never changed or unassigned for the entire length of the compressed data.\n \n As the name indicates,\n a backreference can only reference stored literal chunks found *before* the backreference,\n not ones that come after it.\n ' if hasattr(self, '_m_index'): return (self._m_index if hasattr(self, '_m_index') else None) self._m_index = (self.index_separate if self.is_index_separate else self.index_in_tag) return (self._m_index if hasattr(self, '_m_index') else None)<|docstring|>The index of the referenced literal chunk. Stored literals are assigned index numbers in the order in which they appear in the compressed data, starting at 0. Non-stored literals are not counted in the numbering and cannot be referenced using backreferences. Once an index is assigned to a stored literal, it is never changed or unassigned for the entire length of the compressed data. As the name indicates, a backreference can only reference stored literal chunks found *before* the backreference, not ones that come after it.<|endoftext|>
d4c4c5640a82f52ff1a234979f60add79fc8e7e109c73c089109f1aad4425fd8
@property def lookup_table(self): 'Fixed lookup table that maps tag byte numbers to two bytes each.\n \n The entries in the lookup table are offset -\n index 0 stands for tag 0x4b, 1 for 0x4c, etc.\n ' if hasattr(self, '_m_lookup_table'): return (self._m_lookup_table if hasattr(self, '_m_lookup_table') else None) self._m_lookup_table = [b'\x00\x00', b'N\xba', b'\x00\x08', b'Nu', b'\x00\x0c', b'N\xad', b' S', b'/\x0b', b'a\x00', b'\x00\x10', b'p\x00', b'/\x00', b'Hn', b' P', b' n', b'/.', b'\xff\xfc', b'H\xe7', b'?<', b'\x00\x04', b'\xff\xf8', b'/\x0c', b' \x06', b'N\xed', b'NV', b' h', b'N^', b'\x00\x01', b'X\x8f', b'O\xef', b'\x00\x02', b'\x00\x18', b'`\x00', b'\xff\xff', b'P\x8f', b'N\x90', b'\x00\x06', b'&n', b'\x00\x14', b'\xff\xf4', b'L\xee', b'\x00\n', b'\x00\x0e', b'A\xee', b'L\xdf', b'H\xc0', b'\xff\xf0', b'-@', b'\x00\x12', b'0.', b'p\x01', b'/(', b' T', b'g\x00', b'\x00 ', b'\x00\x1c', b' _', b'\x18\x00', b'&o', b'Hx', b'\x00\x16', b'A\xfa', b'0<', b'(@', b'r\x00', b'(n', b' \x0c', b'f\x00', b' k', b'/\x07', b'U\x8f', b'\x00(', b'\xff\xfe', b'\xff\xec', b'"\xd8', b' \x0b', b'\x00\x0f', b'Y\x8f', b'/<', b'\xff\x00', b'\x01\x18', b'\x81\xe1', b'J\x00', b'N\xb0', b'\xff\xe8', b'H\xc7', b'\x00\x03', b'\x00"', b'\x00\x07', b'\x00\x1a', b'g\x06', b'g\x08', b'N\xf9', b'\x00$', b' x', b'\x08\x00', b'f\x04', b'\x00*', b'N\xd0', b'0(', b'&_', b'g\x04', b'\x000', b'C\xee', b'?\x00', b' \x1f', b'\x00\x1e', b'\xff\xf6', b' .', b'B\xa7', b' \x07', b'\xff\xfa', b'`\x02', b'=@', b'\x0c@', b'f\x06', b'\x00&', b'-H', b'/\x01', b'p\xff', b'`\x04', b'\x18\x80', b'J@', b'\x00@', b'\x00,', b'/\x08', b'\x00\x11', b'\xff\xe4', b'!@', b'&@', b'\xff\xf2', b'Bn', b'N\xb9', b'=|', b'\x008', b'\x00\r', b'`\x06', b'B.', b' <', b'g\x0c', b'-h', b'f\x08', b'J.', b'J\xae', b'\x00.', b'H@', b'"_', b'"\x00', b'g\n', b'0\x07', b'Bg', b'\x002', b' (', b'\x00\t', b'Hz', b'\x02\x00', b'/+', b'\x00\x05', b'"n', b'f\x02', b'\xe5\x80', b'g\x0e', b'f\n', b'\x00P', b'>\x00', b'f\x0c', b'.\x00', b'\xff\xee', b' m', b' @', b'\xff\xe0', b'S@', b'`\x08', b'\x04\x80', b'\x00h', b'\x0b|', b'D\x00', b'A\xe8', b'HA'] return (self._m_lookup_table if hasattr(self, '_m_lookup_table') else None)
Fixed lookup table that maps tag byte numbers to two bytes each. The entries in the lookup table are offset - index 0 stands for tag 0x4b, 1 for 0x4c, etc.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
lookup_table
Mahlet-Inc/Hobbits
304
python
@property def lookup_table(self): 'Fixed lookup table that maps tag byte numbers to two bytes each.\n \n The entries in the lookup table are offset -\n index 0 stands for tag 0x4b, 1 for 0x4c, etc.\n ' if hasattr(self, '_m_lookup_table'): return (self._m_lookup_table if hasattr(self, '_m_lookup_table') else None) self._m_lookup_table = [b'\x00\x00', b'N\xba', b'\x00\x08', b'Nu', b'\x00\x0c', b'N\xad', b' S', b'/\x0b', b'a\x00', b'\x00\x10', b'p\x00', b'/\x00', b'Hn', b' P', b' n', b'/.', b'\xff\xfc', b'H\xe7', b'?<', b'\x00\x04', b'\xff\xf8', b'/\x0c', b' \x06', b'N\xed', b'NV', b' h', b'N^', b'\x00\x01', b'X\x8f', b'O\xef', b'\x00\x02', b'\x00\x18', b'`\x00', b'\xff\xff', b'P\x8f', b'N\x90', b'\x00\x06', b'&n', b'\x00\x14', b'\xff\xf4', b'L\xee', b'\x00\n', b'\x00\x0e', b'A\xee', b'L\xdf', b'H\xc0', b'\xff\xf0', b'-@', b'\x00\x12', b'0.', b'p\x01', b'/(', b' T', b'g\x00', b'\x00 ', b'\x00\x1c', b' _', b'\x18\x00', b'&o', b'Hx', b'\x00\x16', b'A\xfa', b'0<', b'(@', b'r\x00', b'(n', b' \x0c', b'f\x00', b' k', b'/\x07', b'U\x8f', b'\x00(', b'\xff\xfe', b'\xff\xec', b'"\xd8', b' \x0b', b'\x00\x0f', b'Y\x8f', b'/<', b'\xff\x00', b'\x01\x18', b'\x81\xe1', b'J\x00', b'N\xb0', b'\xff\xe8', b'H\xc7', b'\x00\x03', b'\x00"', b'\x00\x07', b'\x00\x1a', b'g\x06', b'g\x08', b'N\xf9', b'\x00$', b' x', b'\x08\x00', b'f\x04', b'\x00*', b'N\xd0', b'0(', b'&_', b'g\x04', b'\x000', b'C\xee', b'?\x00', b' \x1f', b'\x00\x1e', b'\xff\xf6', b' .', b'B\xa7', b' \x07', b'\xff\xfa', b'`\x02', b'=@', b'\x0c@', b'f\x06', b'\x00&', b'-H', b'/\x01', b'p\xff', b'`\x04', b'\x18\x80', b'J@', b'\x00@', b'\x00,', b'/\x08', b'\x00\x11', b'\xff\xe4', b'!@', b'&@', b'\xff\xf2', b'Bn', b'N\xb9', b'=|', b'\x008', b'\x00\r', b'`\x06', b'B.', b' <', b'g\x0c', b'-h', b'f\x08', b'J.', b'J\xae', b'\x00.', b'H@', b'"_', b'"\x00', b'g\n', b'0\x07', b'Bg', b'\x002', b' (', b'\x00\t', b'Hz', b'\x02\x00', b'/+', b'\x00\x05', b'"n', b'f\x02', b'\xe5\x80', b'g\x0e', b'f\n', b'\x00P', b'>\x00', b'f\x0c', b'.\x00', b'\xff\xee', b' m', b' @', b'\xff\xe0', b'S@', b'`\x08', b'\x04\x80', b'\x00h', b'\x0b|', b'D\x00', b'A\xe8', b'HA'] return (self._m_lookup_table if hasattr(self, '_m_lookup_table') else None)
@property def lookup_table(self): 'Fixed lookup table that maps tag byte numbers to two bytes each.\n \n The entries in the lookup table are offset -\n index 0 stands for tag 0x4b, 1 for 0x4c, etc.\n ' if hasattr(self, '_m_lookup_table'): return (self._m_lookup_table if hasattr(self, '_m_lookup_table') else None) self._m_lookup_table = [b'\x00\x00', b'N\xba', b'\x00\x08', b'Nu', b'\x00\x0c', b'N\xad', b' S', b'/\x0b', b'a\x00', b'\x00\x10', b'p\x00', b'/\x00', b'Hn', b' P', b' n', b'/.', b'\xff\xfc', b'H\xe7', b'?<', b'\x00\x04', b'\xff\xf8', b'/\x0c', b' \x06', b'N\xed', b'NV', b' h', b'N^', b'\x00\x01', b'X\x8f', b'O\xef', b'\x00\x02', b'\x00\x18', b'`\x00', b'\xff\xff', b'P\x8f', b'N\x90', b'\x00\x06', b'&n', b'\x00\x14', b'\xff\xf4', b'L\xee', b'\x00\n', b'\x00\x0e', b'A\xee', b'L\xdf', b'H\xc0', b'\xff\xf0', b'-@', b'\x00\x12', b'0.', b'p\x01', b'/(', b' T', b'g\x00', b'\x00 ', b'\x00\x1c', b' _', b'\x18\x00', b'&o', b'Hx', b'\x00\x16', b'A\xfa', b'0<', b'(@', b'r\x00', b'(n', b' \x0c', b'f\x00', b' k', b'/\x07', b'U\x8f', b'\x00(', b'\xff\xfe', b'\xff\xec', b'"\xd8', b' \x0b', b'\x00\x0f', b'Y\x8f', b'/<', b'\xff\x00', b'\x01\x18', b'\x81\xe1', b'J\x00', b'N\xb0', b'\xff\xe8', b'H\xc7', b'\x00\x03', b'\x00"', b'\x00\x07', b'\x00\x1a', b'g\x06', b'g\x08', b'N\xf9', b'\x00$', b' x', b'\x08\x00', b'f\x04', b'\x00*', b'N\xd0', b'0(', b'&_', b'g\x04', b'\x000', b'C\xee', b'?\x00', b' \x1f', b'\x00\x1e', b'\xff\xf6', b' .', b'B\xa7', b' \x07', b'\xff\xfa', b'`\x02', b'=@', b'\x0c@', b'f\x06', b'\x00&', b'-H', b'/\x01', b'p\xff', b'`\x04', b'\x18\x80', b'J@', b'\x00@', b'\x00,', b'/\x08', b'\x00\x11', b'\xff\xe4', b'!@', b'&@', b'\xff\xf2', b'Bn', b'N\xb9', b'=|', b'\x008', b'\x00\r', b'`\x06', b'B.', b' <', b'g\x0c', b'-h', b'f\x08', b'J.', b'J\xae', b'\x00.', b'H@', b'"_', b'"\x00', b'g\n', b'0\x07', b'Bg', b'\x002', b' (', b'\x00\t', b'Hz', b'\x02\x00', b'/+', b'\x00\x05', b'"n', b'f\x02', b'\xe5\x80', b'g\x0e', b'f\n', b'\x00P', b'>\x00', b'f\x0c', b'.\x00', b'\xff\xee', b' m', b' @', b'\xff\xe0', b'S@', b'`\x08', b'\x04\x80', b'\x00h', b'\x0b|', b'D\x00', b'A\xe8', b'HA'] return (self._m_lookup_table if hasattr(self, '_m_lookup_table') else None)<|docstring|>Fixed lookup table that maps tag byte numbers to two bytes each. The entries in the lookup table are offset - index 0 stands for tag 0x4b, 1 for 0x4c, etc.<|endoftext|>
d8626b3a97eb94a3542cfb4e8f8d7760d292b4934de09a5ad64bfddcec5617fc
@property def value(self): 'The two bytes that the tag byte expands to,\n based on the fixed lookup table.\n ' if hasattr(self, '_m_value'): return (self._m_value if hasattr(self, '_m_value') else None) self._m_value = self.lookup_table[(self.tag - 75)] return (self._m_value if hasattr(self, '_m_value') else None)
The two bytes that the tag byte expands to, based on the fixed lookup table.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
value
Mahlet-Inc/Hobbits
304
python
@property def value(self): 'The two bytes that the tag byte expands to,\n based on the fixed lookup table.\n ' if hasattr(self, '_m_value'): return (self._m_value if hasattr(self, '_m_value') else None) self._m_value = self.lookup_table[(self.tag - 75)] return (self._m_value if hasattr(self, '_m_value') else None)
@property def value(self): 'The two bytes that the tag byte expands to,\n based on the fixed lookup table.\n ' if hasattr(self, '_m_value'): return (self._m_value if hasattr(self, '_m_value') else None) self._m_value = self.lookup_table[(self.tag - 75)] return (self._m_value if hasattr(self, '_m_value') else None)<|docstring|>The two bytes that the tag byte expands to, based on the fixed lookup table.<|endoftext|>
a92ba0f3ecbf76b6eae30fb1c26ce60d3e413940ee183d60742d9b93f8478486
@property def segment_number(self): 'The segment number for all of the generated jump table entries.\n \n Although it is stored as a variable-length integer,\n the segment number must be in the range `0x0 <= x <= 0xffff`,\n i. e. an unsigned 16-bit integer.\n ' if hasattr(self, '_m_segment_number'): return (self._m_segment_number if hasattr(self, '_m_segment_number') else None) self._m_segment_number = self.segment_number_raw.value return (self._m_segment_number if hasattr(self, '_m_segment_number') else None)
The segment number for all of the generated jump table entries. Although it is stored as a variable-length integer, the segment number must be in the range `0x0 <= x <= 0xffff`, i. e. an unsigned 16-bit integer.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
segment_number
Mahlet-Inc/Hobbits
304
python
@property def segment_number(self): 'The segment number for all of the generated jump table entries.\n \n Although it is stored as a variable-length integer,\n the segment number must be in the range `0x0 <= x <= 0xffff`,\n i. e. an unsigned 16-bit integer.\n ' if hasattr(self, '_m_segment_number'): return (self._m_segment_number if hasattr(self, '_m_segment_number') else None) self._m_segment_number = self.segment_number_raw.value return (self._m_segment_number if hasattr(self, '_m_segment_number') else None)
@property def segment_number(self): 'The segment number for all of the generated jump table entries.\n \n Although it is stored as a variable-length integer,\n the segment number must be in the range `0x0 <= x <= 0xffff`,\n i. e. an unsigned 16-bit integer.\n ' if hasattr(self, '_m_segment_number'): return (self._m_segment_number if hasattr(self, '_m_segment_number') else None) self._m_segment_number = self.segment_number_raw.value return (self._m_segment_number if hasattr(self, '_m_segment_number') else None)<|docstring|>The segment number for all of the generated jump table entries. Although it is stored as a variable-length integer, the segment number must be in the range `0x0 <= x <= 0xffff`, i. e. an unsigned 16-bit integer.<|endoftext|>
3359f4b80f9ab86d9fbf57e33756e11d3dee9bd39d7d5613bdd5a9020ae5b5ce
@property def num_addresses(self): 'The number of addresses stored in this chunk.\n \n This number must be greater than 0.\n ' if hasattr(self, '_m_num_addresses'): return (self._m_num_addresses if hasattr(self, '_m_num_addresses') else None) self._m_num_addresses = self.num_addresses_raw.value return (self._m_num_addresses if hasattr(self, '_m_num_addresses') else None)
The number of addresses stored in this chunk. This number must be greater than 0.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
num_addresses
Mahlet-Inc/Hobbits
304
python
@property def num_addresses(self): 'The number of addresses stored in this chunk.\n \n This number must be greater than 0.\n ' if hasattr(self, '_m_num_addresses'): return (self._m_num_addresses if hasattr(self, '_m_num_addresses') else None) self._m_num_addresses = self.num_addresses_raw.value return (self._m_num_addresses if hasattr(self, '_m_num_addresses') else None)
@property def num_addresses(self): 'The number of addresses stored in this chunk.\n \n This number must be greater than 0.\n ' if hasattr(self, '_m_num_addresses'): return (self._m_num_addresses if hasattr(self, '_m_num_addresses') else None) self._m_num_addresses = self.num_addresses_raw.value return (self._m_num_addresses if hasattr(self, '_m_num_addresses') else None)<|docstring|>The number of addresses stored in this chunk. This number must be greater than 0.<|endoftext|>
fd39e3207312d93dc66af3bd4cc58adf3268d2c28c0eab38a120b42a93f1735e
@property def byte_count(self): 'The length in bytes of the value to be repeated.\n Regardless of the byte count,\n the value to be repeated is stored as a variable-length integer.\n ' if hasattr(self, '_m_byte_count'): return (self._m_byte_count if hasattr(self, '_m_byte_count') else None) self._m_byte_count = (1 if (self.tag == 2) else (2 if (self.tag == 3) else (- 1))) return (self._m_byte_count if hasattr(self, '_m_byte_count') else None)
The length in bytes of the value to be repeated. Regardless of the byte count, the value to be repeated is stored as a variable-length integer.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
byte_count
Mahlet-Inc/Hobbits
304
python
@property def byte_count(self): 'The length in bytes of the value to be repeated.\n Regardless of the byte count,\n the value to be repeated is stored as a variable-length integer.\n ' if hasattr(self, '_m_byte_count'): return (self._m_byte_count if hasattr(self, '_m_byte_count') else None) self._m_byte_count = (1 if (self.tag == 2) else (2 if (self.tag == 3) else (- 1))) return (self._m_byte_count if hasattr(self, '_m_byte_count') else None)
@property def byte_count(self): 'The length in bytes of the value to be repeated.\n Regardless of the byte count,\n the value to be repeated is stored as a variable-length integer.\n ' if hasattr(self, '_m_byte_count'): return (self._m_byte_count if hasattr(self, '_m_byte_count') else None) self._m_byte_count = (1 if (self.tag == 2) else (2 if (self.tag == 3) else (- 1))) return (self._m_byte_count if hasattr(self, '_m_byte_count') else None)<|docstring|>The length in bytes of the value to be repeated. Regardless of the byte count, the value to be repeated is stored as a variable-length integer.<|endoftext|>
7496707606e23207e4e825119630a0af9cf410b1468d6b1b3a875138f8440642
@property def to_repeat(self): 'The value to repeat.\n \n Although it is stored as a variable-length integer,\n this value must fit into an unsigned big-endian integer that is as long as `byte_count`,\n i. e. either 8 or 16 bits.\n ' if hasattr(self, '_m_to_repeat'): return (self._m_to_repeat if hasattr(self, '_m_to_repeat') else None) self._m_to_repeat = self.to_repeat_raw.value return (self._m_to_repeat if hasattr(self, '_m_to_repeat') else None)
The value to repeat. Although it is stored as a variable-length integer, this value must fit into an unsigned big-endian integer that is as long as `byte_count`, i. e. either 8 or 16 bits.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
to_repeat
Mahlet-Inc/Hobbits
304
python
@property def to_repeat(self): 'The value to repeat.\n \n Although it is stored as a variable-length integer,\n this value must fit into an unsigned big-endian integer that is as long as `byte_count`,\n i. e. either 8 or 16 bits.\n ' if hasattr(self, '_m_to_repeat'): return (self._m_to_repeat if hasattr(self, '_m_to_repeat') else None) self._m_to_repeat = self.to_repeat_raw.value return (self._m_to_repeat if hasattr(self, '_m_to_repeat') else None)
@property def to_repeat(self): 'The value to repeat.\n \n Although it is stored as a variable-length integer,\n this value must fit into an unsigned big-endian integer that is as long as `byte_count`,\n i. e. either 8 or 16 bits.\n ' if hasattr(self, '_m_to_repeat'): return (self._m_to_repeat if hasattr(self, '_m_to_repeat') else None) self._m_to_repeat = self.to_repeat_raw.value return (self._m_to_repeat if hasattr(self, '_m_to_repeat') else None)<|docstring|>The value to repeat. Although it is stored as a variable-length integer, this value must fit into an unsigned big-endian integer that is as long as `byte_count`, i. e. either 8 or 16 bits.<|endoftext|>
50f5b2821f36800399db77ccd872c5c593a26fa440c283327790f2dac5650d15
@property def repeat_count_m1(self): 'The number of times to repeat the value,\n minus one.\n \n This value must not be negative.\n ' if hasattr(self, '_m_repeat_count_m1'): return (self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None) self._m_repeat_count_m1 = self.repeat_count_m1_raw.value return (self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None)
The number of times to repeat the value, minus one. This value must not be negative.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
repeat_count_m1
Mahlet-Inc/Hobbits
304
python
@property def repeat_count_m1(self): 'The number of times to repeat the value,\n minus one.\n \n This value must not be negative.\n ' if hasattr(self, '_m_repeat_count_m1'): return (self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None) self._m_repeat_count_m1 = self.repeat_count_m1_raw.value return (self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None)
@property def repeat_count_m1(self): 'The number of times to repeat the value,\n minus one.\n \n This value must not be negative.\n ' if hasattr(self, '_m_repeat_count_m1'): return (self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None) self._m_repeat_count_m1 = self.repeat_count_m1_raw.value return (self._m_repeat_count_m1 if hasattr(self, '_m_repeat_count_m1') else None)<|docstring|>The number of times to repeat the value, minus one. This value must not be negative.<|endoftext|>
5144fce3a570db307328bce19d9bce79a8653a7c7b8b387ada02bc9a7aa8201d
@property def repeat_count(self): 'The number of times to repeat the value.\n \n This value must be positive.\n ' if hasattr(self, '_m_repeat_count'): return (self._m_repeat_count if hasattr(self, '_m_repeat_count') else None) self._m_repeat_count = (self.repeat_count_m1 + 1) return (self._m_repeat_count if hasattr(self, '_m_repeat_count') else None)
The number of times to repeat the value. This value must be positive.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
repeat_count
Mahlet-Inc/Hobbits
304
python
@property def repeat_count(self): 'The number of times to repeat the value.\n \n This value must be positive.\n ' if hasattr(self, '_m_repeat_count'): return (self._m_repeat_count if hasattr(self, '_m_repeat_count') else None) self._m_repeat_count = (self.repeat_count_m1 + 1) return (self._m_repeat_count if hasattr(self, '_m_repeat_count') else None)
@property def repeat_count(self): 'The number of times to repeat the value.\n \n This value must be positive.\n ' if hasattr(self, '_m_repeat_count'): return (self._m_repeat_count if hasattr(self, '_m_repeat_count') else None) self._m_repeat_count = (self.repeat_count_m1 + 1) return (self._m_repeat_count if hasattr(self, '_m_repeat_count') else None)<|docstring|>The number of times to repeat the value. This value must be positive.<|endoftext|>
82032f547c8af6117db0e6ede4744cf90bde7a30ac1efdd88add7b9c6dd9f2a8
@property def first_value(self): 'The first value in the sequence.\n \n Although it is stored as a variable-length integer,\n this value must be in the range `-0x8000 <= x <= 0x7fff`,\n i. e. a signed 16-bit integer.\n ' if hasattr(self, '_m_first_value'): return (self._m_first_value if hasattr(self, '_m_first_value') else None) self._m_first_value = self.first_value_raw.value return (self._m_first_value if hasattr(self, '_m_first_value') else None)
The first value in the sequence. Although it is stored as a variable-length integer, this value must be in the range `-0x8000 <= x <= 0x7fff`, i. e. a signed 16-bit integer.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
first_value
Mahlet-Inc/Hobbits
304
python
@property def first_value(self): 'The first value in the sequence.\n \n Although it is stored as a variable-length integer,\n this value must be in the range `-0x8000 <= x <= 0x7fff`,\n i. e. a signed 16-bit integer.\n ' if hasattr(self, '_m_first_value'): return (self._m_first_value if hasattr(self, '_m_first_value') else None) self._m_first_value = self.first_value_raw.value return (self._m_first_value if hasattr(self, '_m_first_value') else None)
@property def first_value(self): 'The first value in the sequence.\n \n Although it is stored as a variable-length integer,\n this value must be in the range `-0x8000 <= x <= 0x7fff`,\n i. e. a signed 16-bit integer.\n ' if hasattr(self, '_m_first_value'): return (self._m_first_value if hasattr(self, '_m_first_value') else None) self._m_first_value = self.first_value_raw.value return (self._m_first_value if hasattr(self, '_m_first_value') else None)<|docstring|>The first value in the sequence. Although it is stored as a variable-length integer, this value must be in the range `-0x8000 <= x <= 0x7fff`, i. e. a signed 16-bit integer.<|endoftext|>
71a3f490e72503c63246e85400100fc1e167910177cfc87d68b7067006340dd1
@property def num_deltas(self): 'The number of deltas stored in this chunk.\n \n This number must not be negative.\n ' if hasattr(self, '_m_num_deltas'): return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None) self._m_num_deltas = self.num_deltas_raw.value return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None)
The number of deltas stored in this chunk. This number must not be negative.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
num_deltas
Mahlet-Inc/Hobbits
304
python
@property def num_deltas(self): 'The number of deltas stored in this chunk.\n \n This number must not be negative.\n ' if hasattr(self, '_m_num_deltas'): return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None) self._m_num_deltas = self.num_deltas_raw.value return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None)
@property def num_deltas(self): 'The number of deltas stored in this chunk.\n \n This number must not be negative.\n ' if hasattr(self, '_m_num_deltas'): return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None) self._m_num_deltas = self.num_deltas_raw.value return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None)<|docstring|>The number of deltas stored in this chunk. This number must not be negative.<|endoftext|>
2278a1c9efd192ef85fa5fa8c7692e82f7eb2e14bd157000f62fa3b45277dc43
@property def first_value(self): 'The first value in the sequence.\n ' if hasattr(self, '_m_first_value'): return (self._m_first_value if hasattr(self, '_m_first_value') else None) self._m_first_value = self.first_value_raw.value return (self._m_first_value if hasattr(self, '_m_first_value') else None)
The first value in the sequence.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
first_value
Mahlet-Inc/Hobbits
304
python
@property def first_value(self): '\n ' if hasattr(self, '_m_first_value'): return (self._m_first_value if hasattr(self, '_m_first_value') else None) self._m_first_value = self.first_value_raw.value return (self._m_first_value if hasattr(self, '_m_first_value') else None)
@property def first_value(self): '\n ' if hasattr(self, '_m_first_value'): return (self._m_first_value if hasattr(self, '_m_first_value') else None) self._m_first_value = self.first_value_raw.value return (self._m_first_value if hasattr(self, '_m_first_value') else None)<|docstring|>The first value in the sequence.<|endoftext|>
71a3f490e72503c63246e85400100fc1e167910177cfc87d68b7067006340dd1
@property def num_deltas(self): 'The number of deltas stored in this chunk.\n \n This number must not be negative.\n ' if hasattr(self, '_m_num_deltas'): return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None) self._m_num_deltas = self.num_deltas_raw.value return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None)
The number of deltas stored in this chunk. This number must not be negative.
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/macos/resource_compression/dcmp_0.py
num_deltas
Mahlet-Inc/Hobbits
304
python
@property def num_deltas(self): 'The number of deltas stored in this chunk.\n \n This number must not be negative.\n ' if hasattr(self, '_m_num_deltas'): return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None) self._m_num_deltas = self.num_deltas_raw.value return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None)
@property def num_deltas(self): 'The number of deltas stored in this chunk.\n \n This number must not be negative.\n ' if hasattr(self, '_m_num_deltas'): return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None) self._m_num_deltas = self.num_deltas_raw.value return (self._m_num_deltas if hasattr(self, '_m_num_deltas') else None)<|docstring|>The number of deltas stored in this chunk. This number must not be negative.<|endoftext|>
669f979aa7638e92e5498d27e20d7832f1443561fd11e02e4767c66cea93d327
def parse_number(x): 'Parse a number from a string.' return round(float(x), 6)
Parse a number from a string.
strongsup/tables/structure.py
parse_number
Verose/Decomposable-Attention-Model-for-Semantic-Parsing
72
python
def parse_number(x): return round(float(x), 6)
def parse_number(x): return round(float(x), 6)<|docstring|>Parse a number from a string.<|endoftext|>
3029935944e950d06ac41022b24f3f99e489578e63a99858bcf74ca3590a1f1f
def parse_date(x): 'Parse a date from a string with format yy-mm-dd.' x = x.split('-') assert (len(x) == 3), 'Not a valid date: {}'.format(x) year = ((- 1) if (x[0][0].lower() == 'x') else int(x[0])) month = ((- 1) if (x[1][0].lower() == 'x') else int(x[1])) day = ((- 1) if (x[2][0].lower() == 'x') else int(x[2])) return Date(year, month, day)
Parse a date from a string with format yy-mm-dd.
strongsup/tables/structure.py
parse_date
Verose/Decomposable-Attention-Model-for-Semantic-Parsing
72
python
def parse_date(x): x = x.split('-') assert (len(x) == 3), 'Not a valid date: {}'.format(x) year = ((- 1) if (x[0][0].lower() == 'x') else int(x[0])) month = ((- 1) if (x[1][0].lower() == 'x') else int(x[1])) day = ((- 1) if (x[2][0].lower() == 'x') else int(x[2])) return Date(year, month, day)
def parse_date(x): x = x.split('-') assert (len(x) == 3), 'Not a valid date: {}'.format(x) year = ((- 1) if (x[0][0].lower() == 'x') else int(x[0])) month = ((- 1) if (x[1][0].lower() == 'x') else int(x[1])) day = ((- 1) if (x[2][0].lower() == 'x') else int(x[2])) return Date(year, month, day)<|docstring|>Parse a date from a string with format yy-mm-dd.<|endoftext|>
457992056c63d814e208308df08404c06c46037854f0ed123f37f41dcf4088b4
def parse_value(x): 'Parse the string, which may be a number, a date, or a non-numeric string.' try: return parse_number(x) except: try: return parse_date(x) except: return x
Parse the string, which may be a number, a date, or a non-numeric string.
strongsup/tables/structure.py
parse_value
Verose/Decomposable-Attention-Model-for-Semantic-Parsing
72
python
def parse_value(x): try: return parse_number(x) except: try: return parse_date(x) except: return x
def parse_value(x): try: return parse_number(x) except: try: return parse_date(x) except: return x<|docstring|>Parse the string, which may be a number, a date, or a non-numeric string.<|endoftext|>
9a2bb48e48a6e269685ed71a1a69a19974afaaa063121cc611758662ef459c94
def get_type(x): 'Return the type signature of x. Used to prevent comparison across types.' if isinstance(x, float): return 'N' elif isinstance(x, Date): return 'D' elif isinstance(x, basestring): if (not x.startswith('fb:')): raise ValueError('NameValue does not start with "fb:": {}'.format(x)) tokens = x.split('.') if (len(tokens) != 2): raise ValueError('{} is not an entity'.format(x)) return tokens[0] else: raise ValueError('Unknown type for {}'.format(type(x)))
Return the type signature of x. Used to prevent comparison across types.
strongsup/tables/structure.py
get_type
Verose/Decomposable-Attention-Model-for-Semantic-Parsing
72
python
def get_type(x): if isinstance(x, float): return 'N' elif isinstance(x, Date): return 'D' elif isinstance(x, basestring): if (not x.startswith('fb:')): raise ValueError('NameValue does not start with "fb:": {}'.format(x)) tokens = x.split('.') if (len(tokens) != 2): raise ValueError('{} is not an entity'.format(x)) return tokens[0] else: raise ValueError('Unknown type for {}'.format(type(x)))
def get_type(x): if isinstance(x, float): return 'N' elif isinstance(x, Date): return 'D' elif isinstance(x, basestring): if (not x.startswith('fb:')): raise ValueError('NameValue does not start with "fb:": {}'.format(x)) tokens = x.split('.') if (len(tokens) != 2): raise ValueError('{} is not an entity'.format(x)) return tokens[0] else: raise ValueError('Unknown type for {}'.format(type(x)))<|docstring|>Return the type signature of x. Used to prevent comparison across types.<|endoftext|>
62a854474b16128e1c9ab989cd0c84d09c00275fdcc44a261df7784d417bbfd1
def ensure_same_type(collection, allowed_types=None): 'Ensure that all values in the collection have the same type.\n Return the agreed type. Throw an error if the type is not agreed.\n\n Args:\n collection: A set or a dict where values are sets.\n allowed_types: Restriction on the agreed type.\n Can be a string, a collection of strings, or None (= allow all).\n Returns:\n The agreed type\n Throws:\n ValueError if one of the following happens:\n - The collection is not a set or a set-valued dict\n - The collection is empty\n - Some two items have different types\n - Some item does not agree with the allowed types (if specified)\n ' if isinstance(collection, set): itr = iter(collection) elif isinstance(collection, dict): itr = (x for v in collection.itervalues() for x in v) else: raise ValueError('Bad data type: {}'.format(type(collection))) if (allowed_types and isinstance(allowed_types, basestring)): allowed_types = [allowed_types] agreed_type = None for value in itr: if (agreed_type is None): agreed_type = get_type(value) if ((allowed_types is not None) and (agreed_type not in allowed_types)): raise ValueError('Type {} is not in allowed types {}'.format(agreed_type, allowed_types)) else: t = get_type(value) if (t != agreed_type): raise ValueError('Value {} does not have agreed type {}'.format(value, agreed_type)) if (agreed_type is None): raise ValueError('The collection is empty: {}'.format(collection)) return agreed_type
Ensure that all values in the collection have the same type. Return the agreed type. Throw an error if the type is not agreed. Args: collection: A set or a dict where values are sets. allowed_types: Restriction on the agreed type. Can be a string, a collection of strings, or None (= allow all). Returns: The agreed type Throws: ValueError if one of the following happens: - The collection is not a set or a set-valued dict - The collection is empty - Some two items have different types - Some item does not agree with the allowed types (if specified)
strongsup/tables/structure.py
ensure_same_type
Verose/Decomposable-Attention-Model-for-Semantic-Parsing
72
python
def ensure_same_type(collection, allowed_types=None): 'Ensure that all values in the collection have the same type.\n Return the agreed type. Throw an error if the type is not agreed.\n\n Args:\n collection: A set or a dict where values are sets.\n allowed_types: Restriction on the agreed type.\n Can be a string, a collection of strings, or None (= allow all).\n Returns:\n The agreed type\n Throws:\n ValueError if one of the following happens:\n - The collection is not a set or a set-valued dict\n - The collection is empty\n - Some two items have different types\n - Some item does not agree with the allowed types (if specified)\n ' if isinstance(collection, set): itr = iter(collection) elif isinstance(collection, dict): itr = (x for v in collection.itervalues() for x in v) else: raise ValueError('Bad data type: {}'.format(type(collection))) if (allowed_types and isinstance(allowed_types, basestring)): allowed_types = [allowed_types] agreed_type = None for value in itr: if (agreed_type is None): agreed_type = get_type(value) if ((allowed_types is not None) and (agreed_type not in allowed_types)): raise ValueError('Type {} is not in allowed types {}'.format(agreed_type, allowed_types)) else: t = get_type(value) if (t != agreed_type): raise ValueError('Value {} does not have agreed type {}'.format(value, agreed_type)) if (agreed_type is None): raise ValueError('The collection is empty: {}'.format(collection)) return agreed_type
def ensure_same_type(collection, allowed_types=None): 'Ensure that all values in the collection have the same type.\n Return the agreed type. Throw an error if the type is not agreed.\n\n Args:\n collection: A set or a dict where values are sets.\n allowed_types: Restriction on the agreed type.\n Can be a string, a collection of strings, or None (= allow all).\n Returns:\n The agreed type\n Throws:\n ValueError if one of the following happens:\n - The collection is not a set or a set-valued dict\n - The collection is empty\n - Some two items have different types\n - Some item does not agree with the allowed types (if specified)\n ' if isinstance(collection, set): itr = iter(collection) elif isinstance(collection, dict): itr = (x for v in collection.itervalues() for x in v) else: raise ValueError('Bad data type: {}'.format(type(collection))) if (allowed_types and isinstance(allowed_types, basestring)): allowed_types = [allowed_types] agreed_type = None for value in itr: if (agreed_type is None): agreed_type = get_type(value) if ((allowed_types is not None) and (agreed_type not in allowed_types)): raise ValueError('Type {} is not in allowed types {}'.format(agreed_type, allowed_types)) else: t = get_type(value) if (t != agreed_type): raise ValueError('Value {} does not have agreed type {}'.format(value, agreed_type)) if (agreed_type is None): raise ValueError('The collection is empty: {}'.format(collection)) return agreed_type<|docstring|>Ensure that all values in the collection have the same type. Return the agreed type. Throw an error if the type is not agreed. Args: collection: A set or a dict where values are sets. allowed_types: Restriction on the agreed type. Can be a string, a collection of strings, or None (= allow all). Returns: The agreed type Throws: ValueError if one of the following happens: - The collection is not a set or a set-valued dict - The collection is empty - Some two items have different types - Some item does not agree with the allowed types (if specified)<|endoftext|>
2f61cbdbb4c58470a8385909f60c4a064efc23c99fc6c3b71c09ad554db93f0c
def parse_program(tokens): '\n program:\n | top_statement program\n | top_statement\n ' functions = {} top_statements = StatementList() while tokens.peek(): if tokens.consume_maybe('function'): function_declaration = parse_function_declaration(tokens) functions[function_declaration.name] = function_declaration else: statement = parse_statement(tokens) top_statements.append(statement) return (functions, top_statements)
program: | top_statement program | top_statement
parser/parser.py
parse_program
JamesWP/anzu
0
python
def parse_program(tokens): '\n program:\n | top_statement program\n | top_statement\n ' functions = {} top_statements = StatementList() while tokens.peek(): if tokens.consume_maybe('function'): function_declaration = parse_function_declaration(tokens) functions[function_declaration.name] = function_declaration else: statement = parse_statement(tokens) top_statements.append(statement) return (functions, top_statements)
def parse_program(tokens): '\n program:\n | top_statement program\n | top_statement\n ' functions = {} top_statements = StatementList() while tokens.peek(): if tokens.consume_maybe('function'): function_declaration = parse_function_declaration(tokens) functions[function_declaration.name] = function_declaration else: statement = parse_statement(tokens) top_statements.append(statement) return (functions, top_statements)<|docstring|>program: | top_statement program | top_statement<|endoftext|>
82a8a733fdef9c0d18947a312e723f8fbc721bb4154ba72cf2a06978df385109
def parse_function_declaration(tokens): "\n function_declaration:\n | identifier num_literal num_literal statement_list 'end'\n | identifier num_literal num_literal 'end'\n " name = tokens.consume() num_input_args = int(tokens.consume()) num_output_args = int(tokens.consume()) function = FunctionDeclaration(name, num_input_args, num_output_args) if tokens.consume_maybe('end'): return function function.body = parse_statement_list(tokens) tokens.consume_only('end') return function
function_declaration: | identifier num_literal num_literal statement_list 'end' | identifier num_literal num_literal 'end'
parser/parser.py
parse_function_declaration
JamesWP/anzu
0
python
def parse_function_declaration(tokens): "\n function_declaration:\n | identifier num_literal num_literal statement_list 'end'\n | identifier num_literal num_literal 'end'\n " name = tokens.consume() num_input_args = int(tokens.consume()) num_output_args = int(tokens.consume()) function = FunctionDeclaration(name, num_input_args, num_output_args) if tokens.consume_maybe('end'): return function function.body = parse_statement_list(tokens) tokens.consume_only('end') return function
def parse_function_declaration(tokens): "\n function_declaration:\n | identifier num_literal num_literal statement_list 'end'\n | identifier num_literal num_literal 'end'\n " name = tokens.consume() num_input_args = int(tokens.consume()) num_output_args = int(tokens.consume()) function = FunctionDeclaration(name, num_input_args, num_output_args) if tokens.consume_maybe('end'): return function function.body = parse_statement_list(tokens) tokens.consume_only('end') return function<|docstring|>function_declaration: | identifier num_literal num_literal statement_list 'end' | identifier num_literal num_literal 'end'<|endoftext|>
dab2c92ce2c6eba37eb37a1025fbb25cfe8cb4cafa54307e5563306a6e052ccd
def parse_statement_list(tokens): '\n statement_list:\n | statement\n | statement statement_list\n ' statement_list = StatementList() while True: statement = parse_statement(tokens) statement_list.append(statement) if (tokens.peek() in ['end', 'elif', 'do']): return statement_list
statement_list: | statement | statement statement_list
parser/parser.py
parse_statement_list
JamesWP/anzu
0
python
def parse_statement_list(tokens): '\n statement_list:\n | statement\n | statement statement_list\n ' statement_list = StatementList() while True: statement = parse_statement(tokens) statement_list.append(statement) if (tokens.peek() in ['end', 'elif', 'do']): return statement_list
def parse_statement_list(tokens): '\n statement_list:\n | statement\n | statement statement_list\n ' statement_list = StatementList() while True: statement = parse_statement(tokens) statement_list.append(statement) if (tokens.peek() in ['end', 'elif', 'do']): return statement_list<|docstring|>statement_list: | statement | statement statement_list<|endoftext|>
653b3605075f9421b36d83b0dcb86fad18ea6f900e21c31ac0b0353c57b807ec
def parse_statement(tokens): "\n statement:\n | 'while' statement_list 'do' statement_list 'end'\n | 'while' statement_list 'do' 'end'\n | 'if' if_body\n | num_literal\n | string_literal\n | builtin\n | identifier\n " if tokens.consume_maybe('while'): condition = parse_statement_list(tokens) tokens.consume_only('do') statement = WhileStatement(condition) if tokens.consume_maybe('end'): return statement body = parse_statement_list(tokens) statement.body = body tokens.consume_only('end') return statement if tokens.consume_maybe('if'): return parse_if_body(tokens) token = tokens.consume() return Statement(token)
statement: | 'while' statement_list 'do' statement_list 'end' | 'while' statement_list 'do' 'end' | 'if' if_body | num_literal | string_literal | builtin | identifier
parser/parser.py
parse_statement
JamesWP/anzu
0
python
def parse_statement(tokens): "\n statement:\n | 'while' statement_list 'do' statement_list 'end'\n | 'while' statement_list 'do' 'end'\n | 'if' if_body\n | num_literal\n | string_literal\n | builtin\n | identifier\n " if tokens.consume_maybe('while'): condition = parse_statement_list(tokens) tokens.consume_only('do') statement = WhileStatement(condition) if tokens.consume_maybe('end'): return statement body = parse_statement_list(tokens) statement.body = body tokens.consume_only('end') return statement if tokens.consume_maybe('if'): return parse_if_body(tokens) token = tokens.consume() return Statement(token)
def parse_statement(tokens): "\n statement:\n | 'while' statement_list 'do' statement_list 'end'\n | 'while' statement_list 'do' 'end'\n | 'if' if_body\n | num_literal\n | string_literal\n | builtin\n | identifier\n " if tokens.consume_maybe('while'): condition = parse_statement_list(tokens) tokens.consume_only('do') statement = WhileStatement(condition) if tokens.consume_maybe('end'): return statement body = parse_statement_list(tokens) statement.body = body tokens.consume_only('end') return statement if tokens.consume_maybe('if'): return parse_if_body(tokens) token = tokens.consume() return Statement(token)<|docstring|>statement: | 'while' statement_list 'do' statement_list 'end' | 'while' statement_list 'do' 'end' | 'if' if_body | num_literal | string_literal | builtin | identifier<|endoftext|>
414a33119768ddbcff0a9b776762ce57eb5fd9469b897ad6e03d3578d14d2f8d
def parse_if_body(tokens): "\n if_body:\n | statement_list 'do' statement_list 'elif' if_body\n | statement_list 'do' statement_list 'end'\n " condition = parse_statement_list(tokens) tokens.consume_only('do') body = parse_statement_list(tokens) if_statement = IfStatement(condition, body) if tokens.consume_maybe('elif'): if_statement.else_body = parse_if_body(tokens) return if_statement tokens.consume_only('end') return if_statement
if_body: | statement_list 'do' statement_list 'elif' if_body | statement_list 'do' statement_list 'end'
parser/parser.py
parse_if_body
JamesWP/anzu
0
python
def parse_if_body(tokens): "\n if_body:\n | statement_list 'do' statement_list 'elif' if_body\n | statement_list 'do' statement_list 'end'\n " condition = parse_statement_list(tokens) tokens.consume_only('do') body = parse_statement_list(tokens) if_statement = IfStatement(condition, body) if tokens.consume_maybe('elif'): if_statement.else_body = parse_if_body(tokens) return if_statement tokens.consume_only('end') return if_statement
def parse_if_body(tokens): "\n if_body:\n | statement_list 'do' statement_list 'elif' if_body\n | statement_list 'do' statement_list 'end'\n " condition = parse_statement_list(tokens) tokens.consume_only('do') body = parse_statement_list(tokens) if_statement = IfStatement(condition, body) if tokens.consume_maybe('elif'): if_statement.else_body = parse_if_body(tokens) return if_statement tokens.consume_only('end') return if_statement<|docstring|>if_body: | statement_list 'do' statement_list 'elif' if_body | statement_list 'do' statement_list 'end'<|endoftext|>