body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
aef6d6eabf496bd1accceab820d14e6cfc5730125ca3c71eccdb024020dbf9f1
def _save_restore_dic(rsc_dir: str, bin_dic: dict): '\n ์›ํ˜•๋ณต์› ๋ฐ”์ด๋„ˆ๋ฆฌ ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค.\n Args:\n rsc_dir: resource directory\n bin_dic: binary dictionary\n ' os.makedirs(rsc_dir, exist_ok=True) with open('{}/restore.key'.format(rsc_dir), 'wb') as fkey: with open('{}/restore.val'.format(rsc_dir), 'wb') as fval: for (key, vals) in sorted(bin_dic.items()): logging.debug('\t0x%08x => %s', key, ' '.join([('0x%08x' % val) for val in vals])) fkey.write(struct.pack('I', key)) fval.write(struct.pack(('I' * len(vals)), *vals)) logging.info('restore.key: %d', (4 * len(bin_dic))) logging.info('restore.val: %d', (4 * sum([len(vals) for vals in bin_dic.values()])))
์›ํ˜•๋ณต์› ๋ฐ”์ด๋„ˆ๋ฆฌ ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค. Args: rsc_dir: resource directory bin_dic: binary dictionary
rsc/bin/compile_restore.py
_save_restore_dic
juntf/khaiii
1,235
python
def _save_restore_dic(rsc_dir: str, bin_dic: dict): '\n ์›ํ˜•๋ณต์› ๋ฐ”์ด๋„ˆ๋ฆฌ ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค.\n Args:\n rsc_dir: resource directory\n bin_dic: binary dictionary\n ' os.makedirs(rsc_dir, exist_ok=True) with open('{}/restore.key'.format(rsc_dir), 'wb') as fkey: with open('{}/restore.val'.format(rsc_dir), 'wb') as fval: for (key, vals) in sorted(bin_dic.items()): logging.debug('\t0x%08x => %s', key, ' '.join([('0x%08x' % val) for val in vals])) fkey.write(struct.pack('I', key)) fval.write(struct.pack(('I' * len(vals)), *vals)) logging.info('restore.key: %d', (4 * len(bin_dic))) logging.info('restore.val: %d', (4 * sum([len(vals) for vals in bin_dic.values()])))
def _save_restore_dic(rsc_dir: str, bin_dic: dict): '\n ์›ํ˜•๋ณต์› ๋ฐ”์ด๋„ˆ๋ฆฌ ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค.\n Args:\n rsc_dir: resource directory\n bin_dic: binary dictionary\n ' os.makedirs(rsc_dir, exist_ok=True) with open('{}/restore.key'.format(rsc_dir), 'wb') as fkey: with open('{}/restore.val'.format(rsc_dir), 'wb') as fval: for (key, vals) in sorted(bin_dic.items()): logging.debug('\t0x%08x => %s', key, ' '.join([('0x%08x' % val) for val in vals])) fkey.write(struct.pack('I', key)) fval.write(struct.pack(('I' * len(vals)), *vals)) logging.info('restore.key: %d', (4 * len(bin_dic))) logging.info('restore.val: %d', (4 * sum([len(vals) for vals in bin_dic.values()])))<|docstring|>์›ํ˜•๋ณต์› ๋ฐ”์ด๋„ˆ๋ฆฌ ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค. Args: rsc_dir: resource directory bin_dic: binary dictionary<|endoftext|>
6ad2e8ffc5ea38dc710e8bc2efb2ab81f2ba3e45288edc5bc684b72ac7011714
def _save_restore_one(rsc_dir: str, vocab_out: Dict[(str, int)], vocab_new: Dict[(str, int)]): '\n ์ถœ๋ ฅ ํƒœ๊ทธ ๋ฒˆํ˜ธ ๋ณ„ ์›ํ˜•๋ณต์›์„ ํ•˜์ง€ ์•Š๋Š” ๋น„๋ณต์› ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค.\n Args:\n rsc_dir: resource directory\n vocab_out: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „\n vocab_new: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „์— ์ถ”๊ฐ€ํ•  ์ƒˆ๋กœ์šด ํƒœ๊ทธ\n ' idx_tags = sorted([(idx, tag) for (tag, idx) in (list(vocab_out.items()) + list(vocab_new.items()))]) os.makedirs(rsc_dir, exist_ok=True) with open('{}/restore.one'.format(rsc_dir), 'wb') as fone: fone.write(struct.pack('B', 0)) for (idx, out_tag) in idx_tags: one_tag = out_tag.split(':')[0] one_num = TAG_SET[one_tag[2:]] if (one_tag[0] == 'I'): one_num += len(TAG_SET) logging.debug('%d: 0x%02x [%s] %s', idx, one_num, one_tag, out_tag) fone.write(struct.pack('B', one_num)) logging.info('restore.one: %d', (1 + len(idx_tags)))
์ถœ๋ ฅ ํƒœ๊ทธ ๋ฒˆํ˜ธ ๋ณ„ ์›ํ˜•๋ณต์›์„ ํ•˜์ง€ ์•Š๋Š” ๋น„๋ณต์› ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค. Args: rsc_dir: resource directory vocab_out: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „ vocab_new: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „์— ์ถ”๊ฐ€ํ•  ์ƒˆ๋กœ์šด ํƒœ๊ทธ
rsc/bin/compile_restore.py
_save_restore_one
juntf/khaiii
1,235
python
def _save_restore_one(rsc_dir: str, vocab_out: Dict[(str, int)], vocab_new: Dict[(str, int)]): '\n ์ถœ๋ ฅ ํƒœ๊ทธ ๋ฒˆํ˜ธ ๋ณ„ ์›ํ˜•๋ณต์›์„ ํ•˜์ง€ ์•Š๋Š” ๋น„๋ณต์› ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค.\n Args:\n rsc_dir: resource directory\n vocab_out: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „\n vocab_new: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „์— ์ถ”๊ฐ€ํ•  ์ƒˆ๋กœ์šด ํƒœ๊ทธ\n ' idx_tags = sorted([(idx, tag) for (tag, idx) in (list(vocab_out.items()) + list(vocab_new.items()))]) os.makedirs(rsc_dir, exist_ok=True) with open('{}/restore.one'.format(rsc_dir), 'wb') as fone: fone.write(struct.pack('B', 0)) for (idx, out_tag) in idx_tags: one_tag = out_tag.split(':')[0] one_num = TAG_SET[one_tag[2:]] if (one_tag[0] == 'I'): one_num += len(TAG_SET) logging.debug('%d: 0x%02x [%s] %s', idx, one_num, one_tag, out_tag) fone.write(struct.pack('B', one_num)) logging.info('restore.one: %d', (1 + len(idx_tags)))
def _save_restore_one(rsc_dir: str, vocab_out: Dict[(str, int)], vocab_new: Dict[(str, int)]): '\n ์ถœ๋ ฅ ํƒœ๊ทธ ๋ฒˆํ˜ธ ๋ณ„ ์›ํ˜•๋ณต์›์„ ํ•˜์ง€ ์•Š๋Š” ๋น„๋ณต์› ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค.\n Args:\n rsc_dir: resource directory\n vocab_out: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „\n vocab_new: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „์— ์ถ”๊ฐ€ํ•  ์ƒˆ๋กœ์šด ํƒœ๊ทธ\n ' idx_tags = sorted([(idx, tag) for (tag, idx) in (list(vocab_out.items()) + list(vocab_new.items()))]) os.makedirs(rsc_dir, exist_ok=True) with open('{}/restore.one'.format(rsc_dir), 'wb') as fone: fone.write(struct.pack('B', 0)) for (idx, out_tag) in idx_tags: one_tag = out_tag.split(':')[0] one_num = TAG_SET[one_tag[2:]] if (one_tag[0] == 'I'): one_num += len(TAG_SET) logging.debug('%d: 0x%02x [%s] %s', idx, one_num, one_tag, out_tag) fone.write(struct.pack('B', one_num)) logging.info('restore.one: %d', (1 + len(idx_tags)))<|docstring|>์ถœ๋ ฅ ํƒœ๊ทธ ๋ฒˆํ˜ธ ๋ณ„ ์›ํ˜•๋ณต์›์„ ํ•˜์ง€ ์•Š๋Š” ๋น„๋ณต์› ์‚ฌ์ „์„ ์ €์žฅํ•œ๋‹ค. Args: rsc_dir: resource directory vocab_out: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „ vocab_new: ์ถœ๋ ฅ ํƒœ๊ทธ ์‚ฌ์ „์— ์ถ”๊ฐ€ํ•  ์ƒˆ๋กœ์šด ํƒœ๊ทธ<|endoftext|>
23e62105d8025ef5c5ec43923879c929751c0be95d6b051d4baa756e76f2addd
def run(args: Namespace): '\n run function which is the start point of program\n Args:\n args: program arguments\n ' restore_dic = parse_restore_dic('{}/restore.dic'.format(args.rsc_src)) if (not restore_dic): sys.exit(1) vocab_out = load_vocab_out(args.rsc_src) if (not vocab_out): sys.exit(2) vocab_new = {} bin_dic = _make_bin(restore_dic, vocab_out, vocab_new) _save_restore_dic(args.rsc_dir, bin_dic) _save_restore_one(args.rsc_dir, vocab_out, vocab_new) append_new_entries(args.rsc_src, None, vocab_new)
run function which is the start point of program Args: args: program arguments
rsc/bin/compile_restore.py
run
juntf/khaiii
1,235
python
def run(args: Namespace): '\n run function which is the start point of program\n Args:\n args: program arguments\n ' restore_dic = parse_restore_dic('{}/restore.dic'.format(args.rsc_src)) if (not restore_dic): sys.exit(1) vocab_out = load_vocab_out(args.rsc_src) if (not vocab_out): sys.exit(2) vocab_new = {} bin_dic = _make_bin(restore_dic, vocab_out, vocab_new) _save_restore_dic(args.rsc_dir, bin_dic) _save_restore_one(args.rsc_dir, vocab_out, vocab_new) append_new_entries(args.rsc_src, None, vocab_new)
def run(args: Namespace): '\n run function which is the start point of program\n Args:\n args: program arguments\n ' restore_dic = parse_restore_dic('{}/restore.dic'.format(args.rsc_src)) if (not restore_dic): sys.exit(1) vocab_out = load_vocab_out(args.rsc_src) if (not vocab_out): sys.exit(2) vocab_new = {} bin_dic = _make_bin(restore_dic, vocab_out, vocab_new) _save_restore_dic(args.rsc_dir, bin_dic) _save_restore_one(args.rsc_dir, vocab_out, vocab_new) append_new_entries(args.rsc_src, None, vocab_new)<|docstring|>run function which is the start point of program Args: args: program arguments<|endoftext|>
bee8a5c64c5fdafdc21de3289179f55660c22ef1e017ccb737c33590aabefaf7
def main(): '\n main function processes only argument parsing\n ' parser = ArgumentParser(description='๊ธฐ๋ถ„์„ ์‚ฌ์ „์„ ๋นŒ๋“œํ•˜๋Š” ์Šคํฌ๋ฆฝํŠธ') parser.add_argument('--rsc-src', help='source directory (text) <default: ./src>', metavar='DIR', default='./src') parser.add_argument('--rsc-dir', help='target directory (binary) <default: ./share/khaiii>', metavar='DIR', default='./share/khaiii') parser.add_argument('--debug', help='enable debug', action='store_true') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) run(args)
main function processes only argument parsing
rsc/bin/compile_restore.py
main
juntf/khaiii
1,235
python
def main(): '\n \n ' parser = ArgumentParser(description='๊ธฐ๋ถ„์„ ์‚ฌ์ „์„ ๋นŒ๋“œํ•˜๋Š” ์Šคํฌ๋ฆฝํŠธ') parser.add_argument('--rsc-src', help='source directory (text) <default: ./src>', metavar='DIR', default='./src') parser.add_argument('--rsc-dir', help='target directory (binary) <default: ./share/khaiii>', metavar='DIR', default='./share/khaiii') parser.add_argument('--debug', help='enable debug', action='store_true') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) run(args)
def main(): '\n \n ' parser = ArgumentParser(description='๊ธฐ๋ถ„์„ ์‚ฌ์ „์„ ๋นŒ๋“œํ•˜๋Š” ์Šคํฌ๋ฆฝํŠธ') parser.add_argument('--rsc-src', help='source directory (text) <default: ./src>', metavar='DIR', default='./src') parser.add_argument('--rsc-dir', help='target directory (binary) <default: ./share/khaiii>', metavar='DIR', default='./share/khaiii') parser.add_argument('--debug', help='enable debug', action='store_true') args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) run(args)<|docstring|>main function processes only argument parsing<|endoftext|>
dc0a1d3a4f2d95fa4eab3933741df20bdde9cc86d7f4f157f1982a5c8ad372b2
def basic_lti_launch(self, document_url=None, grading_supported=True): 'Do a basic LTI launch with the given document_url.' self.sync_lti_data_to_h() self.store_lti_data() self.context.get_or_create_course() if grading_supported: self.context.js_config.maybe_enable_grading() if (document_url is not None): self.context.js_config.add_document_url(document_url) return {}
Do a basic LTI launch with the given document_url.
lms/views/basic_lti_launch.py
basic_lti_launch
mattdricker/lms
0
python
def basic_lti_launch(self, document_url=None, grading_supported=True): self.sync_lti_data_to_h() self.store_lti_data() self.context.get_or_create_course() if grading_supported: self.context.js_config.maybe_enable_grading() if (document_url is not None): self.context.js_config.add_document_url(document_url) return {}
def basic_lti_launch(self, document_url=None, grading_supported=True): self.sync_lti_data_to_h() self.store_lti_data() self.context.get_or_create_course() if grading_supported: self.context.js_config.maybe_enable_grading() if (document_url is not None): self.context.js_config.add_document_url(document_url) return {}<|docstring|>Do a basic LTI launch with the given document_url.<|endoftext|>
8f944cd6634ed93d87109854ce25707c7e0aa7284896814e2387b68d844374de
def sync_lti_data_to_h(self): '\n Sync LTI data to H.\n\n Before any LTI assignment launch create or update the Hypothesis user\n and group corresponding to the LTI user and course.\n ' self.request.find_service(name='lti_h').sync([self.context.h_group], self.request.params)
Sync LTI data to H. Before any LTI assignment launch create or update the Hypothesis user and group corresponding to the LTI user and course.
lms/views/basic_lti_launch.py
sync_lti_data_to_h
mattdricker/lms
0
python
def sync_lti_data_to_h(self): '\n Sync LTI data to H.\n\n Before any LTI assignment launch create or update the Hypothesis user\n and group corresponding to the LTI user and course.\n ' self.request.find_service(name='lti_h').sync([self.context.h_group], self.request.params)
def sync_lti_data_to_h(self): '\n Sync LTI data to H.\n\n Before any LTI assignment launch create or update the Hypothesis user\n and group corresponding to the LTI user and course.\n ' self.request.find_service(name='lti_h').sync([self.context.h_group], self.request.params)<|docstring|>Sync LTI data to H. Before any LTI assignment launch create or update the Hypothesis user and group corresponding to the LTI user and course.<|endoftext|>
16881c0476bcbd5108a4034aa26ffd8da5a7a4f6162844d1f06ac095d1aee440
def store_lti_data(self): 'Store LTI launch data in our LMS database.' request = self.request LtiLaunches.add(request.db, request.params.get('context_id'), request.params.get('oauth_consumer_key')) lti_user = request.lti_user if ((not lti_user.is_instructor) and (not self.context.is_canvas)): request.find_service(name='grading_info').upsert_from_request(request, h_user=lti_user.h_user, lti_user=lti_user)
Store LTI launch data in our LMS database.
lms/views/basic_lti_launch.py
store_lti_data
mattdricker/lms
0
python
def store_lti_data(self): request = self.request LtiLaunches.add(request.db, request.params.get('context_id'), request.params.get('oauth_consumer_key')) lti_user = request.lti_user if ((not lti_user.is_instructor) and (not self.context.is_canvas)): request.find_service(name='grading_info').upsert_from_request(request, h_user=lti_user.h_user, lti_user=lti_user)
def store_lti_data(self): request = self.request LtiLaunches.add(request.db, request.params.get('context_id'), request.params.get('oauth_consumer_key')) lti_user = request.lti_user if ((not lti_user.is_instructor) and (not self.context.is_canvas)): request.find_service(name='grading_info').upsert_from_request(request, h_user=lti_user.h_user, lti_user=lti_user)<|docstring|>Store LTI launch data in our LMS database.<|endoftext|>
6fd784d3e0faa687c0c5fb1bb272ac6bfd1946161343cab822919581f202a454
@view_config(canvas_file=True) def canvas_file_basic_lti_launch(self): "\n Respond to a Canvas file assignment launch.\n\n Canvas file assignment launch requests have a ``file_id`` request\n parameter, which is the Canvas instance's ID for the file. To display\n the assignment we have to use this ``file_id`` to get a download URL\n for the file from the Canvas API. We then pass that download URL to\n Via. We have to re-do this file-ID-for-download-URL exchange on every\n single launch because Canvas's download URLs are temporary.\n " course_id = self.request.params['custom_canvas_course_id'] file_id = self.request.params['file_id'] resource_link_id = self.request.params['resource_link_id'] self.assignment_service.set_document_url(self.request.params['tool_consumer_instance_guid'], resource_link_id, document_url=f'canvas://file/course/{course_id}/file_id/{file_id}') self.context.js_config.add_canvas_file_id(course_id, resource_link_id, file_id) return self.basic_lti_launch(grading_supported=False)
Respond to a Canvas file assignment launch. Canvas file assignment launch requests have a ``file_id`` request parameter, which is the Canvas instance's ID for the file. To display the assignment we have to use this ``file_id`` to get a download URL for the file from the Canvas API. We then pass that download URL to Via. We have to re-do this file-ID-for-download-URL exchange on every single launch because Canvas's download URLs are temporary.
lms/views/basic_lti_launch.py
canvas_file_basic_lti_launch
mattdricker/lms
0
python
@view_config(canvas_file=True) def canvas_file_basic_lti_launch(self): "\n Respond to a Canvas file assignment launch.\n\n Canvas file assignment launch requests have a ``file_id`` request\n parameter, which is the Canvas instance's ID for the file. To display\n the assignment we have to use this ``file_id`` to get a download URL\n for the file from the Canvas API. We then pass that download URL to\n Via. We have to re-do this file-ID-for-download-URL exchange on every\n single launch because Canvas's download URLs are temporary.\n " course_id = self.request.params['custom_canvas_course_id'] file_id = self.request.params['file_id'] resource_link_id = self.request.params['resource_link_id'] self.assignment_service.set_document_url(self.request.params['tool_consumer_instance_guid'], resource_link_id, document_url=f'canvas://file/course/{course_id}/file_id/{file_id}') self.context.js_config.add_canvas_file_id(course_id, resource_link_id, file_id) return self.basic_lti_launch(grading_supported=False)
@view_config(canvas_file=True) def canvas_file_basic_lti_launch(self): "\n Respond to a Canvas file assignment launch.\n\n Canvas file assignment launch requests have a ``file_id`` request\n parameter, which is the Canvas instance's ID for the file. To display\n the assignment we have to use this ``file_id`` to get a download URL\n for the file from the Canvas API. We then pass that download URL to\n Via. We have to re-do this file-ID-for-download-URL exchange on every\n single launch because Canvas's download URLs are temporary.\n " course_id = self.request.params['custom_canvas_course_id'] file_id = self.request.params['file_id'] resource_link_id = self.request.params['resource_link_id'] self.assignment_service.set_document_url(self.request.params['tool_consumer_instance_guid'], resource_link_id, document_url=f'canvas://file/course/{course_id}/file_id/{file_id}') self.context.js_config.add_canvas_file_id(course_id, resource_link_id, file_id) return self.basic_lti_launch(grading_supported=False)<|docstring|>Respond to a Canvas file assignment launch. Canvas file assignment launch requests have a ``file_id`` request parameter, which is the Canvas instance's ID for the file. To display the assignment we have to use this ``file_id`` to get a download URL for the file from the Canvas API. We then pass that download URL to Via. We have to re-do this file-ID-for-download-URL exchange on every single launch because Canvas's download URLs are temporary.<|endoftext|>
3a0d69665b9c607cd1a9dbf7462c8d1bbd47bca2f33e7aad17bee30b86b7f797
@view_config(vitalsource_book=True) def vitalsource_lti_launch(self): "\n Respond to a VitalSource book launch.\n\n The book and chapter to show are configured by `book_id` and `cfi` request\n parameters. VitalSource book launches involve a second LTI launch.\n Hypothesis's LMS app generates the form parameters needed to load\n VitalSource's book viewer using an LTI launch. The LMS frontend then\n renders these parameters into a form and auto-submits the form to perform\n an authenticated launch of the VS book viewer.\n " self.sync_lti_data_to_h() self.store_lti_data() self.context.js_config.maybe_enable_grading() self.context.js_config.add_vitalsource_launch_config(self.request.params['book_id'], self.request.params.get('cfi')) return {}
Respond to a VitalSource book launch. The book and chapter to show are configured by `book_id` and `cfi` request parameters. VitalSource book launches involve a second LTI launch. Hypothesis's LMS app generates the form parameters needed to load VitalSource's book viewer using an LTI launch. The LMS frontend then renders these parameters into a form and auto-submits the form to perform an authenticated launch of the VS book viewer.
lms/views/basic_lti_launch.py
vitalsource_lti_launch
mattdricker/lms
0
python
@view_config(vitalsource_book=True) def vitalsource_lti_launch(self): "\n Respond to a VitalSource book launch.\n\n The book and chapter to show are configured by `book_id` and `cfi` request\n parameters. VitalSource book launches involve a second LTI launch.\n Hypothesis's LMS app generates the form parameters needed to load\n VitalSource's book viewer using an LTI launch. The LMS frontend then\n renders these parameters into a form and auto-submits the form to perform\n an authenticated launch of the VS book viewer.\n " self.sync_lti_data_to_h() self.store_lti_data() self.context.js_config.maybe_enable_grading() self.context.js_config.add_vitalsource_launch_config(self.request.params['book_id'], self.request.params.get('cfi')) return {}
@view_config(vitalsource_book=True) def vitalsource_lti_launch(self): "\n Respond to a VitalSource book launch.\n\n The book and chapter to show are configured by `book_id` and `cfi` request\n parameters. VitalSource book launches involve a second LTI launch.\n Hypothesis's LMS app generates the form parameters needed to load\n VitalSource's book viewer using an LTI launch. The LMS frontend then\n renders these parameters into a form and auto-submits the form to perform\n an authenticated launch of the VS book viewer.\n " self.sync_lti_data_to_h() self.store_lti_data() self.context.js_config.maybe_enable_grading() self.context.js_config.add_vitalsource_launch_config(self.request.params['book_id'], self.request.params.get('cfi')) return {}<|docstring|>Respond to a VitalSource book launch. The book and chapter to show are configured by `book_id` and `cfi` request parameters. VitalSource book launches involve a second LTI launch. Hypothesis's LMS app generates the form parameters needed to load VitalSource's book viewer using an LTI launch. The LMS frontend then renders these parameters into a form and auto-submits the form to perform an authenticated launch of the VS book viewer.<|endoftext|>
e2176da8985e513fce7ef66310e38725463f31b80d5287b455842455c4045218
@view_config(db_configured=True) def db_configured_basic_lti_launch(self): "\n Respond to a DB-configured assignment launch.\n\n DB-configured assignment launch requests don't have any kind of file ID\n or document URL in the request. Instead the document URL is stored in\n our own DB. This happens with LMS's that don't support LTI content item\n selection/deep linking, so they don't support storing the document URL\n in the LMS and passing it back to us in each launch request. Instead we\n retrieve the document URL from the DB and pass it to Via.\n " tool_consumer_instance_guid = self.request.params['tool_consumer_instance_guid'] resource_link_id = self.request.params['resource_link_id'] document_url = self.assignment_service.get_document_url(tool_consumer_instance_guid, resource_link_id) return self.basic_lti_launch(document_url)
Respond to a DB-configured assignment launch. DB-configured assignment launch requests don't have any kind of file ID or document URL in the request. Instead the document URL is stored in our own DB. This happens with LMS's that don't support LTI content item selection/deep linking, so they don't support storing the document URL in the LMS and passing it back to us in each launch request. Instead we retrieve the document URL from the DB and pass it to Via.
lms/views/basic_lti_launch.py
db_configured_basic_lti_launch
mattdricker/lms
0
python
@view_config(db_configured=True) def db_configured_basic_lti_launch(self): "\n Respond to a DB-configured assignment launch.\n\n DB-configured assignment launch requests don't have any kind of file ID\n or document URL in the request. Instead the document URL is stored in\n our own DB. This happens with LMS's that don't support LTI content item\n selection/deep linking, so they don't support storing the document URL\n in the LMS and passing it back to us in each launch request. Instead we\n retrieve the document URL from the DB and pass it to Via.\n " tool_consumer_instance_guid = self.request.params['tool_consumer_instance_guid'] resource_link_id = self.request.params['resource_link_id'] document_url = self.assignment_service.get_document_url(tool_consumer_instance_guid, resource_link_id) return self.basic_lti_launch(document_url)
@view_config(db_configured=True) def db_configured_basic_lti_launch(self): "\n Respond to a DB-configured assignment launch.\n\n DB-configured assignment launch requests don't have any kind of file ID\n or document URL in the request. Instead the document URL is stored in\n our own DB. This happens with LMS's that don't support LTI content item\n selection/deep linking, so they don't support storing the document URL\n in the LMS and passing it back to us in each launch request. Instead we\n retrieve the document URL from the DB and pass it to Via.\n " tool_consumer_instance_guid = self.request.params['tool_consumer_instance_guid'] resource_link_id = self.request.params['resource_link_id'] document_url = self.assignment_service.get_document_url(tool_consumer_instance_guid, resource_link_id) return self.basic_lti_launch(document_url)<|docstring|>Respond to a DB-configured assignment launch. DB-configured assignment launch requests don't have any kind of file ID or document URL in the request. Instead the document URL is stored in our own DB. This happens with LMS's that don't support LTI content item selection/deep linking, so they don't support storing the document URL in the LMS and passing it back to us in each launch request. Instead we retrieve the document URL from the DB and pass it to Via.<|endoftext|>
a7bf30954503dc792d42b258be3e134b4dbffa173167f8f27a330626552f4ce3
@view_config(blackboard_copied=True) def blackboard_copied_basic_lti_launch(self): "\n Respond to a launch of a newly-copied Blackboard assignment.\n\n For more about Blackboard course copy see the BlackboardCopied\n predicate's docstring.\n " return self.course_copied_basic_lti_launch(BlackboardCopied.get_original_resource_link_id(self.request))
Respond to a launch of a newly-copied Blackboard assignment. For more about Blackboard course copy see the BlackboardCopied predicate's docstring.
lms/views/basic_lti_launch.py
blackboard_copied_basic_lti_launch
mattdricker/lms
0
python
@view_config(blackboard_copied=True) def blackboard_copied_basic_lti_launch(self): "\n Respond to a launch of a newly-copied Blackboard assignment.\n\n For more about Blackboard course copy see the BlackboardCopied\n predicate's docstring.\n " return self.course_copied_basic_lti_launch(BlackboardCopied.get_original_resource_link_id(self.request))
@view_config(blackboard_copied=True) def blackboard_copied_basic_lti_launch(self): "\n Respond to a launch of a newly-copied Blackboard assignment.\n\n For more about Blackboard course copy see the BlackboardCopied\n predicate's docstring.\n " return self.course_copied_basic_lti_launch(BlackboardCopied.get_original_resource_link_id(self.request))<|docstring|>Respond to a launch of a newly-copied Blackboard assignment. For more about Blackboard course copy see the BlackboardCopied predicate's docstring.<|endoftext|>
9d821a1966d103dc881b92a40919e3d08c7a64387f29d26658c86c311e2ab0b0
@view_config(brightspace_copied=True) def brightspace_copied_basic_lti_launch(self): "\n Respond to a launch of a newly-copied Brightspace assignment.\n\n For more about Brightspace course copy see the BrightspaceCopied\n predicate's docstring.\n " return self.course_copied_basic_lti_launch(BrightspaceCopied.get_original_resource_link_id(self.request))
Respond to a launch of a newly-copied Brightspace assignment. For more about Brightspace course copy see the BrightspaceCopied predicate's docstring.
lms/views/basic_lti_launch.py
brightspace_copied_basic_lti_launch
mattdricker/lms
0
python
@view_config(brightspace_copied=True) def brightspace_copied_basic_lti_launch(self): "\n Respond to a launch of a newly-copied Brightspace assignment.\n\n For more about Brightspace course copy see the BrightspaceCopied\n predicate's docstring.\n " return self.course_copied_basic_lti_launch(BrightspaceCopied.get_original_resource_link_id(self.request))
@view_config(brightspace_copied=True) def brightspace_copied_basic_lti_launch(self): "\n Respond to a launch of a newly-copied Brightspace assignment.\n\n For more about Brightspace course copy see the BrightspaceCopied\n predicate's docstring.\n " return self.course_copied_basic_lti_launch(BrightspaceCopied.get_original_resource_link_id(self.request))<|docstring|>Respond to a launch of a newly-copied Brightspace assignment. For more about Brightspace course copy see the BrightspaceCopied predicate's docstring.<|endoftext|>
726321c03b0f9f5944906be1d2650e54c958f3e2dd95e7ce6c93bdca1de9a2cb
def course_copied_basic_lti_launch(self, original_resource_link_id): '\n Respond to a launch of a newly-copied assignment.\n\n Find the document_url for the original assignment and make a copy of it\n with the new resource_link_id, then launch the assignment as normal.\n\n Helper method for the *_copied_basic_lti_launch() methods above.\n\n :param original_resource_link_id: the resource_link_id of the original\n assignment that this assignment was copied from\n ' tool_consumer_instance_guid = self.request.params['tool_consumer_instance_guid'] resource_link_id = self.request.params['resource_link_id'] document_url = self.assignment_service.get_document_url(tool_consumer_instance_guid, original_resource_link_id) self.assignment_service.set_document_url(tool_consumer_instance_guid, resource_link_id, document_url) return self.basic_lti_launch(document_url)
Respond to a launch of a newly-copied assignment. Find the document_url for the original assignment and make a copy of it with the new resource_link_id, then launch the assignment as normal. Helper method for the *_copied_basic_lti_launch() methods above. :param original_resource_link_id: the resource_link_id of the original assignment that this assignment was copied from
lms/views/basic_lti_launch.py
course_copied_basic_lti_launch
mattdricker/lms
0
python
def course_copied_basic_lti_launch(self, original_resource_link_id): '\n Respond to a launch of a newly-copied assignment.\n\n Find the document_url for the original assignment and make a copy of it\n with the new resource_link_id, then launch the assignment as normal.\n\n Helper method for the *_copied_basic_lti_launch() methods above.\n\n :param original_resource_link_id: the resource_link_id of the original\n assignment that this assignment was copied from\n ' tool_consumer_instance_guid = self.request.params['tool_consumer_instance_guid'] resource_link_id = self.request.params['resource_link_id'] document_url = self.assignment_service.get_document_url(tool_consumer_instance_guid, original_resource_link_id) self.assignment_service.set_document_url(tool_consumer_instance_guid, resource_link_id, document_url) return self.basic_lti_launch(document_url)
def course_copied_basic_lti_launch(self, original_resource_link_id): '\n Respond to a launch of a newly-copied assignment.\n\n Find the document_url for the original assignment and make a copy of it\n with the new resource_link_id, then launch the assignment as normal.\n\n Helper method for the *_copied_basic_lti_launch() methods above.\n\n :param original_resource_link_id: the resource_link_id of the original\n assignment that this assignment was copied from\n ' tool_consumer_instance_guid = self.request.params['tool_consumer_instance_guid'] resource_link_id = self.request.params['resource_link_id'] document_url = self.assignment_service.get_document_url(tool_consumer_instance_guid, original_resource_link_id) self.assignment_service.set_document_url(tool_consumer_instance_guid, resource_link_id, document_url) return self.basic_lti_launch(document_url)<|docstring|>Respond to a launch of a newly-copied assignment. Find the document_url for the original assignment and make a copy of it with the new resource_link_id, then launch the assignment as normal. Helper method for the *_copied_basic_lti_launch() methods above. :param original_resource_link_id: the resource_link_id of the original assignment that this assignment was copied from<|endoftext|>
a2cf2577a225e9210187c9b2754b47c709e9775772b8c79ad42bcef12614025e
@view_config(url_configured=True, schema=URLConfiguredBasicLTILaunchSchema) def url_configured_basic_lti_launch(self): "\n Respond to a URL-configured assignment launch.\n\n URL-configured assignment launch requests have the document URL in the\n ``url`` request parameter. This happens in LMS's that support LTI\n content item selection/deep linking: the document URL is chosen during\n content item selection (during assignment creation) and saved in the\n LMS, which passes it back to us in each launch request. All we have to\n do is pass the URL to Via.\n " return self.basic_lti_launch(self.request.parsed_params['url'])
Respond to a URL-configured assignment launch. URL-configured assignment launch requests have the document URL in the ``url`` request parameter. This happens in LMS's that support LTI content item selection/deep linking: the document URL is chosen during content item selection (during assignment creation) and saved in the LMS, which passes it back to us in each launch request. All we have to do is pass the URL to Via.
lms/views/basic_lti_launch.py
url_configured_basic_lti_launch
mattdricker/lms
0
python
@view_config(url_configured=True, schema=URLConfiguredBasicLTILaunchSchema) def url_configured_basic_lti_launch(self): "\n Respond to a URL-configured assignment launch.\n\n URL-configured assignment launch requests have the document URL in the\n ``url`` request parameter. This happens in LMS's that support LTI\n content item selection/deep linking: the document URL is chosen during\n content item selection (during assignment creation) and saved in the\n LMS, which passes it back to us in each launch request. All we have to\n do is pass the URL to Via.\n " return self.basic_lti_launch(self.request.parsed_params['url'])
@view_config(url_configured=True, schema=URLConfiguredBasicLTILaunchSchema) def url_configured_basic_lti_launch(self): "\n Respond to a URL-configured assignment launch.\n\n URL-configured assignment launch requests have the document URL in the\n ``url`` request parameter. This happens in LMS's that support LTI\n content item selection/deep linking: the document URL is chosen during\n content item selection (during assignment creation) and saved in the\n LMS, which passes it back to us in each launch request. All we have to\n do is pass the URL to Via.\n " return self.basic_lti_launch(self.request.parsed_params['url'])<|docstring|>Respond to a URL-configured assignment launch. URL-configured assignment launch requests have the document URL in the ``url`` request parameter. This happens in LMS's that support LTI content item selection/deep linking: the document URL is chosen during content item selection (during assignment creation) and saved in the LMS, which passes it back to us in each launch request. All we have to do is pass the URL to Via.<|endoftext|>
8f81b1b8231860d4da7d2d4a918d9afcc21b2268e47e2e8d6a89395048623356
@view_config(authorized_to_configure_assignments=True, configured=False, renderer='lms:templates/file_picker.html.jinja2') def unconfigured_basic_lti_launch(self): "\n Respond to an unconfigured assignment launch.\n\n Unconfigured assignment launch requests don't contain any document URL\n or file ID because the assignment's document hasn't been chosen yet.\n This happens in LMS's that don't support LTI content item\n selection/deep linking. They go straight from assignment creation to\n launching the assignment without the user having had a chance to choose\n a document.\n\n When this happens we show the user our document-selection form instead\n of launching the assignment. The user will choose the document and\n we'll save it in our DB. Subsequent launches of the same assignment\n will then be DB-configured launches rather than unconfigured.\n " self.context.get_or_create_course() form_fields = {param: value for (param, value) in self.request.params.items() if (param not in ['oauth_nonce', 'oauth_timestamp', 'oauth_signature'])} form_fields['authorization'] = BearerTokenSchema(self.request).authorization_param(self.request.lti_user) self.context.js_config.enable_content_item_selection_mode(form_action=self.request.route_url('configure_assignment'), form_fields=form_fields) return {}
Respond to an unconfigured assignment launch. Unconfigured assignment launch requests don't contain any document URL or file ID because the assignment's document hasn't been chosen yet. This happens in LMS's that don't support LTI content item selection/deep linking. They go straight from assignment creation to launching the assignment without the user having had a chance to choose a document. When this happens we show the user our document-selection form instead of launching the assignment. The user will choose the document and we'll save it in our DB. Subsequent launches of the same assignment will then be DB-configured launches rather than unconfigured.
lms/views/basic_lti_launch.py
unconfigured_basic_lti_launch
mattdricker/lms
0
python
@view_config(authorized_to_configure_assignments=True, configured=False, renderer='lms:templates/file_picker.html.jinja2') def unconfigured_basic_lti_launch(self): "\n Respond to an unconfigured assignment launch.\n\n Unconfigured assignment launch requests don't contain any document URL\n or file ID because the assignment's document hasn't been chosen yet.\n This happens in LMS's that don't support LTI content item\n selection/deep linking. They go straight from assignment creation to\n launching the assignment without the user having had a chance to choose\n a document.\n\n When this happens we show the user our document-selection form instead\n of launching the assignment. The user will choose the document and\n we'll save it in our DB. Subsequent launches of the same assignment\n will then be DB-configured launches rather than unconfigured.\n " self.context.get_or_create_course() form_fields = {param: value for (param, value) in self.request.params.items() if (param not in ['oauth_nonce', 'oauth_timestamp', 'oauth_signature'])} form_fields['authorization'] = BearerTokenSchema(self.request).authorization_param(self.request.lti_user) self.context.js_config.enable_content_item_selection_mode(form_action=self.request.route_url('configure_assignment'), form_fields=form_fields) return {}
@view_config(authorized_to_configure_assignments=True, configured=False, renderer='lms:templates/file_picker.html.jinja2') def unconfigured_basic_lti_launch(self): "\n Respond to an unconfigured assignment launch.\n\n Unconfigured assignment launch requests don't contain any document URL\n or file ID because the assignment's document hasn't been chosen yet.\n This happens in LMS's that don't support LTI content item\n selection/deep linking. They go straight from assignment creation to\n launching the assignment without the user having had a chance to choose\n a document.\n\n When this happens we show the user our document-selection form instead\n of launching the assignment. The user will choose the document and\n we'll save it in our DB. Subsequent launches of the same assignment\n will then be DB-configured launches rather than unconfigured.\n " self.context.get_or_create_course() form_fields = {param: value for (param, value) in self.request.params.items() if (param not in ['oauth_nonce', 'oauth_timestamp', 'oauth_signature'])} form_fields['authorization'] = BearerTokenSchema(self.request).authorization_param(self.request.lti_user) self.context.js_config.enable_content_item_selection_mode(form_action=self.request.route_url('configure_assignment'), form_fields=form_fields) return {}<|docstring|>Respond to an unconfigured assignment launch. Unconfigured assignment launch requests don't contain any document URL or file ID because the assignment's document hasn't been chosen yet. This happens in LMS's that don't support LTI content item selection/deep linking. They go straight from assignment creation to launching the assignment without the user having had a chance to choose a document. When this happens we show the user our document-selection form instead of launching the assignment. The user will choose the document and we'll save it in our DB. Subsequent launches of the same assignment will then be DB-configured launches rather than unconfigured.<|endoftext|>
c21f01283dcea7985788fb36bfdddb5d304d9a67018798bfd097cd1c0237839a
@view_config(authorized_to_configure_assignments=False, configured=False, renderer='lms:templates/basic_lti_launch/unconfigured_basic_lti_launch_not_authorized.html.jinja2') def unconfigured_basic_lti_launch_not_authorized(self): "\n Respond to an unauthorized unconfigured assignment launch.\n\n This happens when an assignment's document hasn't been chosen yet and\n the assignment is launched by a user who isn't authorized to choose the\n document (for example a learner rather than a teacher). We just show an\n error page.\n " return {}
Respond to an unauthorized unconfigured assignment launch. This happens when an assignment's document hasn't been chosen yet and the assignment is launched by a user who isn't authorized to choose the document (for example a learner rather than a teacher). We just show an error page.
lms/views/basic_lti_launch.py
unconfigured_basic_lti_launch_not_authorized
mattdricker/lms
0
python
@view_config(authorized_to_configure_assignments=False, configured=False, renderer='lms:templates/basic_lti_launch/unconfigured_basic_lti_launch_not_authorized.html.jinja2') def unconfigured_basic_lti_launch_not_authorized(self): "\n Respond to an unauthorized unconfigured assignment launch.\n\n This happens when an assignment's document hasn't been chosen yet and\n the assignment is launched by a user who isn't authorized to choose the\n document (for example a learner rather than a teacher). We just show an\n error page.\n " return {}
@view_config(authorized_to_configure_assignments=False, configured=False, renderer='lms:templates/basic_lti_launch/unconfigured_basic_lti_launch_not_authorized.html.jinja2') def unconfigured_basic_lti_launch_not_authorized(self): "\n Respond to an unauthorized unconfigured assignment launch.\n\n This happens when an assignment's document hasn't been chosen yet and\n the assignment is launched by a user who isn't authorized to choose the\n document (for example a learner rather than a teacher). We just show an\n error page.\n " return {}<|docstring|>Respond to an unauthorized unconfigured assignment launch. This happens when an assignment's document hasn't been chosen yet and the assignment is launched by a user who isn't authorized to choose the document (for example a learner rather than a teacher). We just show an error page.<|endoftext|>
6dc14c07fd19c75ef02a1244a729c4c7597dafc589c57fb78f6388900dfd2b6e
@view_config(authorized_to_configure_assignments=True, route_name='configure_assignment', schema=ConfigureAssignmentSchema) def configure_assignment(self): '\n Respond to a configure module item request.\n\n This happens after an unconfigured assignment launch. We show the user\n our document selection form instead of launching the assignment, and\n when the user chooses a document and submits the form this is the view\n that receives that form submission.\n\n We save the chosen document in the DB so that subsequent launches of\n this same assignment will be DB-configured rather than unconfigured.\n And we also send back the assignment launch page, passing the chosen\n URL to Via, as the direct response to the content item form submission.\n ' document_url = self.request.parsed_params['document_url'] self.assignment_service.set_document_url(self.request.parsed_params['tool_consumer_instance_guid'], self.request.parsed_params['resource_link_id'], document_url) self.context.js_config.add_document_url(document_url) self.sync_lti_data_to_h() self.store_lti_data() self.context.js_config.maybe_enable_grading() return {}
Respond to a configure module item request. This happens after an unconfigured assignment launch. We show the user our document selection form instead of launching the assignment, and when the user chooses a document and submits the form this is the view that receives that form submission. We save the chosen document in the DB so that subsequent launches of this same assignment will be DB-configured rather than unconfigured. And we also send back the assignment launch page, passing the chosen URL to Via, as the direct response to the content item form submission.
lms/views/basic_lti_launch.py
configure_assignment
mattdricker/lms
0
python
@view_config(authorized_to_configure_assignments=True, route_name='configure_assignment', schema=ConfigureAssignmentSchema) def configure_assignment(self): '\n Respond to a configure module item request.\n\n This happens after an unconfigured assignment launch. We show the user\n our document selection form instead of launching the assignment, and\n when the user chooses a document and submits the form this is the view\n that receives that form submission.\n\n We save the chosen document in the DB so that subsequent launches of\n this same assignment will be DB-configured rather than unconfigured.\n And we also send back the assignment launch page, passing the chosen\n URL to Via, as the direct response to the content item form submission.\n ' document_url = self.request.parsed_params['document_url'] self.assignment_service.set_document_url(self.request.parsed_params['tool_consumer_instance_guid'], self.request.parsed_params['resource_link_id'], document_url) self.context.js_config.add_document_url(document_url) self.sync_lti_data_to_h() self.store_lti_data() self.context.js_config.maybe_enable_grading() return {}
@view_config(authorized_to_configure_assignments=True, route_name='configure_assignment', schema=ConfigureAssignmentSchema) def configure_assignment(self): '\n Respond to a configure module item request.\n\n This happens after an unconfigured assignment launch. We show the user\n our document selection form instead of launching the assignment, and\n when the user chooses a document and submits the form this is the view\n that receives that form submission.\n\n We save the chosen document in the DB so that subsequent launches of\n this same assignment will be DB-configured rather than unconfigured.\n And we also send back the assignment launch page, passing the chosen\n URL to Via, as the direct response to the content item form submission.\n ' document_url = self.request.parsed_params['document_url'] self.assignment_service.set_document_url(self.request.parsed_params['tool_consumer_instance_guid'], self.request.parsed_params['resource_link_id'], document_url) self.context.js_config.add_document_url(document_url) self.sync_lti_data_to_h() self.store_lti_data() self.context.js_config.maybe_enable_grading() return {}<|docstring|>Respond to a configure module item request. This happens after an unconfigured assignment launch. We show the user our document selection form instead of launching the assignment, and when the user chooses a document and submits the form this is the view that receives that form submission. We save the chosen document in the DB so that subsequent launches of this same assignment will be DB-configured rather than unconfigured. And we also send back the assignment launch page, passing the chosen URL to Via, as the direct response to the content item form submission.<|endoftext|>
b3e5990fbdcb2eb8ac996cf8f3e6bc911b0cae6f30fffcd02d15bf3d7d49409d
def test_assertfailedwrite(): '\n\tIO Test - made to fail with Wrong Output file path\n\t' np.random.seed(0) random_data = (np.random.random(size=(10, 1080, 1920, 3)) * 255) input_data = random_data.astype(np.uint8) with pytest.raises(AssertionError): writer = WriteGear('wrong_path/output.mp4') writer.write(input_data) writer.close()
IO Test - made to fail with Wrong Output file path
vidgear/tests/writer_tests/test_IO.py
test_assertfailedwrite
winnerineast/vidgear
1
python
def test_assertfailedwrite(): '\n\t\n\t' np.random.seed(0) random_data = (np.random.random(size=(10, 1080, 1920, 3)) * 255) input_data = random_data.astype(np.uint8) with pytest.raises(AssertionError): writer = WriteGear('wrong_path/output.mp4') writer.write(input_data) writer.close()
def test_assertfailedwrite(): '\n\t\n\t' np.random.seed(0) random_data = (np.random.random(size=(10, 1080, 1920, 3)) * 255) input_data = random_data.astype(np.uint8) with pytest.raises(AssertionError): writer = WriteGear('wrong_path/output.mp4') writer.write(input_data) writer.close()<|docstring|>IO Test - made to fail with Wrong Output file path<|endoftext|>
2fea8bcfd1a1a5beef187bcd4a7273514208180c5c24a091ed62f04476458825
def test_failedextension(): '\n\tIO Test - made to fail with filename with wrong extention\n\t' np.random.seed(0) random_data = (np.random.random(size=(10, 1080, 1920, 3)) * 255) input_data = random_data.astype(np.uint8) with pytest.raises(ValueError): writer = WriteGear('garbage.garbage') writer.write(input_data) writer.close()
IO Test - made to fail with filename with wrong extention
vidgear/tests/writer_tests/test_IO.py
test_failedextension
winnerineast/vidgear
1
python
def test_failedextension(): '\n\t\n\t' np.random.seed(0) random_data = (np.random.random(size=(10, 1080, 1920, 3)) * 255) input_data = random_data.astype(np.uint8) with pytest.raises(ValueError): writer = WriteGear('garbage.garbage') writer.write(input_data) writer.close()
def test_failedextension(): '\n\t\n\t' np.random.seed(0) random_data = (np.random.random(size=(10, 1080, 1920, 3)) * 255) input_data = random_data.astype(np.uint8) with pytest.raises(ValueError): writer = WriteGear('garbage.garbage') writer.write(input_data) writer.close()<|docstring|>IO Test - made to fail with filename with wrong extention<|endoftext|>
93281dd4a23eaf2158739c45bf6e6cc5573ebeeb1b9aad548d2d0c90b4d8f377
def __init__(self, name=None, physical_name=None, tag=None, _configuration=None): 'AgentThingProperties - a model defined in Swagger' if (_configuration is None): _configuration = Configuration() self._configuration = _configuration self._name = None self._physical_name = None self._tag = None self.discriminator = None if (name is not None): self.name = name if (physical_name is not None): self.physical_name = physical_name if (tag is not None): self.tag = tag
AgentThingProperties - a model defined in Swagger
ctm_api_client/models/agent_thing_properties.py
__init__
tadinve/ctm_python_client
0
python
def __init__(self, name=None, physical_name=None, tag=None, _configuration=None): if (_configuration is None): _configuration = Configuration() self._configuration = _configuration self._name = None self._physical_name = None self._tag = None self.discriminator = None if (name is not None): self.name = name if (physical_name is not None): self.physical_name = physical_name if (tag is not None): self.tag = tag
def __init__(self, name=None, physical_name=None, tag=None, _configuration=None): if (_configuration is None): _configuration = Configuration() self._configuration = _configuration self._name = None self._physical_name = None self._tag = None self.discriminator = None if (name is not None): self.name = name if (physical_name is not None): self.physical_name = physical_name if (tag is not None): self.tag = tag<|docstring|>AgentThingProperties - a model defined in Swagger<|endoftext|>
39018a111492d28ac0c852d50ba6e4e6f1f60dd893ae87ca4cdac275bcf14195
@property def name(self): 'Gets the name of this AgentThingProperties. # noqa: E501\n\n the logical name to be used for new agent (equivilant to NodeId) # noqa: E501\n\n :return: The name of this AgentThingProperties. # noqa: E501\n :rtype: str\n ' return self._name
Gets the name of this AgentThingProperties. # noqa: E501 the logical name to be used for new agent (equivilant to NodeId) # noqa: E501 :return: The name of this AgentThingProperties. # noqa: E501 :rtype: str
ctm_api_client/models/agent_thing_properties.py
name
tadinve/ctm_python_client
0
python
@property def name(self): 'Gets the name of this AgentThingProperties. # noqa: E501\n\n the logical name to be used for new agent (equivilant to NodeId) # noqa: E501\n\n :return: The name of this AgentThingProperties. # noqa: E501\n :rtype: str\n ' return self._name
@property def name(self): 'Gets the name of this AgentThingProperties. # noqa: E501\n\n the logical name to be used for new agent (equivilant to NodeId) # noqa: E501\n\n :return: The name of this AgentThingProperties. # noqa: E501\n :rtype: str\n ' return self._name<|docstring|>Gets the name of this AgentThingProperties. # noqa: E501 the logical name to be used for new agent (equivilant to NodeId) # noqa: E501 :return: The name of this AgentThingProperties. # noqa: E501 :rtype: str<|endoftext|>
6b6bbd6d8e440b05714b560cd3f9d4031140f5aa4f067e0952a03ffef2718d08
@name.setter def name(self, name): 'Sets the name of this AgentThingProperties.\n\n the logical name to be used for new agent (equivilant to NodeId) # noqa: E501\n\n :param name: The name of this AgentThingProperties. # noqa: E501\n :type: str\n ' self._name = name
Sets the name of this AgentThingProperties. the logical name to be used for new agent (equivilant to NodeId) # noqa: E501 :param name: The name of this AgentThingProperties. # noqa: E501 :type: str
ctm_api_client/models/agent_thing_properties.py
name
tadinve/ctm_python_client
0
python
@name.setter def name(self, name): 'Sets the name of this AgentThingProperties.\n\n the logical name to be used for new agent (equivilant to NodeId) # noqa: E501\n\n :param name: The name of this AgentThingProperties. # noqa: E501\n :type: str\n ' self._name = name
@name.setter def name(self, name): 'Sets the name of this AgentThingProperties.\n\n the logical name to be used for new agent (equivilant to NodeId) # noqa: E501\n\n :param name: The name of this AgentThingProperties. # noqa: E501\n :type: str\n ' self._name = name<|docstring|>Sets the name of this AgentThingProperties. the logical name to be used for new agent (equivilant to NodeId) # noqa: E501 :param name: The name of this AgentThingProperties. # noqa: E501 :type: str<|endoftext|>
3d9b35cf23e5db685971c68756dfb30f83afa47f448b33f28e143c9362d54a0a
@property def physical_name(self): 'Gets the physical_name of this AgentThingProperties. # noqa: E501\n\n the physical name of the thing the agent is installed on # noqa: E501\n\n :return: The physical_name of this AgentThingProperties. # noqa: E501\n :rtype: str\n ' return self._physical_name
Gets the physical_name of this AgentThingProperties. # noqa: E501 the physical name of the thing the agent is installed on # noqa: E501 :return: The physical_name of this AgentThingProperties. # noqa: E501 :rtype: str
ctm_api_client/models/agent_thing_properties.py
physical_name
tadinve/ctm_python_client
0
python
@property def physical_name(self): 'Gets the physical_name of this AgentThingProperties. # noqa: E501\n\n the physical name of the thing the agent is installed on # noqa: E501\n\n :return: The physical_name of this AgentThingProperties. # noqa: E501\n :rtype: str\n ' return self._physical_name
@property def physical_name(self): 'Gets the physical_name of this AgentThingProperties. # noqa: E501\n\n the physical name of the thing the agent is installed on # noqa: E501\n\n :return: The physical_name of this AgentThingProperties. # noqa: E501\n :rtype: str\n ' return self._physical_name<|docstring|>Gets the physical_name of this AgentThingProperties. # noqa: E501 the physical name of the thing the agent is installed on # noqa: E501 :return: The physical_name of this AgentThingProperties. # noqa: E501 :rtype: str<|endoftext|>
68a0ddaba0e9539c21c66c7ef9e9d0c053d18b05071e1b3b0bb1be8b20c909d2
@physical_name.setter def physical_name(self, physical_name): 'Sets the physical_name of this AgentThingProperties.\n\n the physical name of the thing the agent is installed on # noqa: E501\n\n :param physical_name: The physical_name of this AgentThingProperties. # noqa: E501\n :type: str\n ' self._physical_name = physical_name
Sets the physical_name of this AgentThingProperties. the physical name of the thing the agent is installed on # noqa: E501 :param physical_name: The physical_name of this AgentThingProperties. # noqa: E501 :type: str
ctm_api_client/models/agent_thing_properties.py
physical_name
tadinve/ctm_python_client
0
python
@physical_name.setter def physical_name(self, physical_name): 'Sets the physical_name of this AgentThingProperties.\n\n the physical name of the thing the agent is installed on # noqa: E501\n\n :param physical_name: The physical_name of this AgentThingProperties. # noqa: E501\n :type: str\n ' self._physical_name = physical_name
@physical_name.setter def physical_name(self, physical_name): 'Sets the physical_name of this AgentThingProperties.\n\n the physical name of the thing the agent is installed on # noqa: E501\n\n :param physical_name: The physical_name of this AgentThingProperties. # noqa: E501\n :type: str\n ' self._physical_name = physical_name<|docstring|>Sets the physical_name of this AgentThingProperties. the physical name of the thing the agent is installed on # noqa: E501 :param physical_name: The physical_name of this AgentThingProperties. # noqa: E501 :type: str<|endoftext|>
dbd932be984f4d3118b82d48c21afe50faaa168af7652f806602e32e8c772336
@property def tag(self): 'Gets the tag of this AgentThingProperties. # noqa: E501\n\n the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501\n\n :return: The tag of this AgentThingProperties. # noqa: E501\n :rtype: str\n ' return self._tag
Gets the tag of this AgentThingProperties. # noqa: E501 the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501 :return: The tag of this AgentThingProperties. # noqa: E501 :rtype: str
ctm_api_client/models/agent_thing_properties.py
tag
tadinve/ctm_python_client
0
python
@property def tag(self): 'Gets the tag of this AgentThingProperties. # noqa: E501\n\n the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501\n\n :return: The tag of this AgentThingProperties. # noqa: E501\n :rtype: str\n ' return self._tag
@property def tag(self): 'Gets the tag of this AgentThingProperties. # noqa: E501\n\n the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501\n\n :return: The tag of this AgentThingProperties. # noqa: E501\n :rtype: str\n ' return self._tag<|docstring|>Gets the tag of this AgentThingProperties. # noqa: E501 the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501 :return: The tag of this AgentThingProperties. # noqa: E501 :rtype: str<|endoftext|>
e0345b0f9cdd5c85c4971511a135fd31b1f101af53a8871dabf1e920b0b75e7e
@tag.setter def tag(self, tag): 'Sets the tag of this AgentThingProperties.\n\n the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501\n\n :param tag: The tag of this AgentThingProperties. # noqa: E501\n :type: str\n ' self._tag = tag
Sets the tag of this AgentThingProperties. the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501 :param tag: The tag of this AgentThingProperties. # noqa: E501 :type: str
ctm_api_client/models/agent_thing_properties.py
tag
tadinve/ctm_python_client
0
python
@tag.setter def tag(self, tag): 'Sets the tag of this AgentThingProperties.\n\n the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501\n\n :param tag: The tag of this AgentThingProperties. # noqa: E501\n :type: str\n ' self._tag = tag
@tag.setter def tag(self, tag): 'Sets the tag of this AgentThingProperties.\n\n the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501\n\n :param tag: The tag of this AgentThingProperties. # noqa: E501\n :type: str\n ' self._tag = tag<|docstring|>Sets the tag of this AgentThingProperties. the agent tag to be associated with the new agent (CMS RBA permissions tag that is) # noqa: E501 :param tag: The tag of this AgentThingProperties. # noqa: E501 :type: str<|endoftext|>
7ef2a1bb433525ea7b34d7a8df59d542de09a94040ab54d996aa1ff5a002a8f3
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(AgentThingProperties, dict): for (key, value) in self.items(): result[key] = value return result
Returns the model properties as a dict
ctm_api_client/models/agent_thing_properties.py
to_dict
tadinve/ctm_python_client
0
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(AgentThingProperties, dict): for (key, value) in self.items(): result[key] = value return result
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value if issubclass(AgentThingProperties, dict): for (key, value) in self.items(): result[key] = value return result<|docstring|>Returns the model properties as a dict<|endoftext|>
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
Returns the string representation of the model
ctm_api_client/models/agent_thing_properties.py
to_str
tadinve/ctm_python_client
0
python
def to_str(self): return pprint.pformat(self.to_dict())
def to_str(self): return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
For `print` and `pprint`
ctm_api_client/models/agent_thing_properties.py
__repr__
tadinve/ctm_python_client
0
python
def __repr__(self): return self.to_str()
def __repr__(self): return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
6b64912bd6638ae335c6a7458f60bc6df34ddb734354f3a3e252ae35723752fb
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, AgentThingProperties)): return False return (self.to_dict() == other.to_dict())
Returns true if both objects are equal
ctm_api_client/models/agent_thing_properties.py
__eq__
tadinve/ctm_python_client
0
python
def __eq__(self, other): if (not isinstance(other, AgentThingProperties)): return False return (self.to_dict() == other.to_dict())
def __eq__(self, other): if (not isinstance(other, AgentThingProperties)): return False return (self.to_dict() == other.to_dict())<|docstring|>Returns true if both objects are equal<|endoftext|>
e63e3dc1e13b926ad30b50c71c879f4b3466e8e88b5491514de5b5be5a2c73ab
def __ne__(self, other): 'Returns true if both objects are not equal' if (not isinstance(other, AgentThingProperties)): return True return (self.to_dict() != other.to_dict())
Returns true if both objects are not equal
ctm_api_client/models/agent_thing_properties.py
__ne__
tadinve/ctm_python_client
0
python
def __ne__(self, other): if (not isinstance(other, AgentThingProperties)): return True return (self.to_dict() != other.to_dict())
def __ne__(self, other): if (not isinstance(other, AgentThingProperties)): return True return (self.to_dict() != other.to_dict())<|docstring|>Returns true if both objects are not equal<|endoftext|>
7ea6216aa5d5e5d2d376e1323957a1bc945adc872a2934c3364e884e93a5f383
@app.route('/') def main(): 'Main function that does main things.' print('salut') return 'wesh tranquille?'
Main function that does main things.
Flask/Test/main.py
main
MarcPartensky/Python-2020
1
python
@app.route('/') def main(): print('salut') return 'wesh tranquille?'
@app.route('/') def main(): print('salut') return 'wesh tranquille?'<|docstring|>Main function that does main things.<|endoftext|>
6cbcfb2749389ac8de8d54cd5181729d72f25be75aaf33b6c774d51a5c7ead2d
def _ctx_name_callback(): "\n We don't use threading.current_thread() because it will deadlock if\n called when profiling threading._active_limbo_lock.acquire().\n See: #Issue48.\n " try: current_thread = threading._active[get_ident()] return current_thread.__class__.__name__ except KeyError: return None
We don't use threading.current_thread() because it will deadlock if called when profiling threading._active_limbo_lock.acquire(). See: #Issue48.
yappi/yappi.py
_ctx_name_callback
gaborbernat/yappi
877
python
def _ctx_name_callback(): "\n We don't use threading.current_thread() because it will deadlock if\n called when profiling threading._active_limbo_lock.acquire().\n See: #Issue48.\n " try: current_thread = threading._active[get_ident()] return current_thread.__class__.__name__ except KeyError: return None
def _ctx_name_callback(): "\n We don't use threading.current_thread() because it will deadlock if\n called when profiling threading._active_limbo_lock.acquire().\n See: #Issue48.\n " try: current_thread = threading._active[get_ident()] return current_thread.__class__.__name__ except KeyError: return None<|docstring|>We don't use threading.current_thread() because it will deadlock if called when profiling threading._active_limbo_lock.acquire(). See: #Issue48.<|endoftext|>
2b798517999bf81a17646d9ca1103d81eab40daa1e749175037b31d85f6f776e
def _profile_thread_callback(frame, event, arg): "\n _profile_thread_callback will only be called once per-thread. _yappi will detect\n the new thread and changes the profilefunc param of the ThreadState\n structure. This is an internal function please don't mess with it.\n " _yappi._profile_event(frame, event, arg)
_profile_thread_callback will only be called once per-thread. _yappi will detect the new thread and changes the profilefunc param of the ThreadState structure. This is an internal function please don't mess with it.
yappi/yappi.py
_profile_thread_callback
gaborbernat/yappi
877
python
def _profile_thread_callback(frame, event, arg): "\n _profile_thread_callback will only be called once per-thread. _yappi will detect\n the new thread and changes the profilefunc param of the ThreadState\n structure. This is an internal function please don't mess with it.\n " _yappi._profile_event(frame, event, arg)
def _profile_thread_callback(frame, event, arg): "\n _profile_thread_callback will only be called once per-thread. _yappi will detect\n the new thread and changes the profilefunc param of the ThreadState\n structure. This is an internal function please don't mess with it.\n " _yappi._profile_event(frame, event, arg)<|docstring|>_profile_thread_callback will only be called once per-thread. _yappi will detect the new thread and changes the profilefunc param of the ThreadState structure. This is an internal function please don't mess with it.<|endoftext|>
92887686e355da4bb99eaf52c47428c2c0803def11cf4aa94fa50db014a0530f
def _create_greenlet_callbacks(): "\n Returns two functions:\n - one that can identify unique greenlets. Identity of a greenlet\n cannot be reused once a greenlet dies. 'id(greenlet)' cannot be used because\n 'id' returns an identifier that can be reused once a greenlet object is garbage\n collected.\n - one that can return the name of the greenlet class used to spawn the greenlet\n " try: from greenlet import getcurrent except ImportError as exc: raise YappiError(("'greenlet' import failed with: %s" % repr(exc))) def _get_greenlet_id(): curr_greenlet = getcurrent() id_ = getattr(curr_greenlet, '_yappi_tid', None) if (id_ is None): id_ = GREENLET_COUNTER() curr_greenlet._yappi_tid = id_ return id_ def _get_greenlet_name(): return getcurrent().__class__.__name__ return (_get_greenlet_id, _get_greenlet_name)
Returns two functions: - one that can identify unique greenlets. Identity of a greenlet cannot be reused once a greenlet dies. 'id(greenlet)' cannot be used because 'id' returns an identifier that can be reused once a greenlet object is garbage collected. - one that can return the name of the greenlet class used to spawn the greenlet
yappi/yappi.py
_create_greenlet_callbacks
gaborbernat/yappi
877
python
def _create_greenlet_callbacks(): "\n Returns two functions:\n - one that can identify unique greenlets. Identity of a greenlet\n cannot be reused once a greenlet dies. 'id(greenlet)' cannot be used because\n 'id' returns an identifier that can be reused once a greenlet object is garbage\n collected.\n - one that can return the name of the greenlet class used to spawn the greenlet\n " try: from greenlet import getcurrent except ImportError as exc: raise YappiError(("'greenlet' import failed with: %s" % repr(exc))) def _get_greenlet_id(): curr_greenlet = getcurrent() id_ = getattr(curr_greenlet, '_yappi_tid', None) if (id_ is None): id_ = GREENLET_COUNTER() curr_greenlet._yappi_tid = id_ return id_ def _get_greenlet_name(): return getcurrent().__class__.__name__ return (_get_greenlet_id, _get_greenlet_name)
def _create_greenlet_callbacks(): "\n Returns two functions:\n - one that can identify unique greenlets. Identity of a greenlet\n cannot be reused once a greenlet dies. 'id(greenlet)' cannot be used because\n 'id' returns an identifier that can be reused once a greenlet object is garbage\n collected.\n - one that can return the name of the greenlet class used to spawn the greenlet\n " try: from greenlet import getcurrent except ImportError as exc: raise YappiError(("'greenlet' import failed with: %s" % repr(exc))) def _get_greenlet_id(): curr_greenlet = getcurrent() id_ = getattr(curr_greenlet, '_yappi_tid', None) if (id_ is None): id_ = GREENLET_COUNTER() curr_greenlet._yappi_tid = id_ return id_ def _get_greenlet_name(): return getcurrent().__class__.__name__ return (_get_greenlet_id, _get_greenlet_name)<|docstring|>Returns two functions: - one that can identify unique greenlets. Identity of a greenlet cannot be reused once a greenlet dies. 'id(greenlet)' cannot be used because 'id' returns an identifier that can be reused once a greenlet object is garbage collected. - one that can return the name of the greenlet class used to spawn the greenlet<|endoftext|>
f199ef7cf9053fb06d32514139a5d9d430e39ba81eff09a7faa809b510ac296c
def _fft(x, COL_SIZE=8): '\n function to prettify time columns in stats.\n ' _rprecision = 6 while (_rprecision > 0): _fmt = (('%0.' + ('%d' % _rprecision)) + 'f') s = (_fmt % x) if (len(s) <= COL_SIZE): break _rprecision -= 1 return s
function to prettify time columns in stats.
yappi/yappi.py
_fft
gaborbernat/yappi
877
python
def _fft(x, COL_SIZE=8): '\n \n ' _rprecision = 6 while (_rprecision > 0): _fmt = (('%0.' + ('%d' % _rprecision)) + 'f') s = (_fmt % x) if (len(s) <= COL_SIZE): break _rprecision -= 1 return s
def _fft(x, COL_SIZE=8): '\n \n ' _rprecision = 6 while (_rprecision > 0): _fmt = (('%0.' + ('%d' % _rprecision)) + 'f') s = (_fmt % x) if (len(s) <= COL_SIZE): break _rprecision -= 1 return s<|docstring|>function to prettify time columns in stats.<|endoftext|>
3914d2c012eeaa553b3a4923326dab529aa6be815bf92a184c330e81eb8edbe9
def func_matches(stat, funcs): "\n This function will not work with stats that are saved and loaded. That is \n because current API of loading stats is as following:\n yappi.get_func_stats(filter_callback=_filter).add('dummy.ys').print_all()\n\n funcs: is an iterable that selects functions via method descriptor/bound method\n or function object. selector type depends on the function object: If function\n is a builtin method, you can use method_descriptor. If it is a builtin function\n you can select it like e.g: `time.sleep`. For other cases you could use anything \n that has a code object.\n " if (not isinstance(stat, YStat)): raise YappiError(("Argument 'stat' shall be a YStat object. (%s)" % stat)) if (not isinstance(funcs, list)): raise YappiError(("Argument 'funcs' is not a list object. (%s)" % funcs)) if (not len(funcs)): raise YappiError("Argument 'funcs' cannot be empty.") if (stat.full_name not in _fn_descriptor_dict): return False funcs = set(funcs) for func in funcs.copy(): if (not callable(func)): raise YappiError(("Non-callable item in 'funcs'. (%s)" % func)) if getattr(func, '__code__', None): funcs.add(func.__code__) try: return (_fn_descriptor_dict[stat.full_name] in funcs) except TypeError: return False
This function will not work with stats that are saved and loaded. That is because current API of loading stats is as following: yappi.get_func_stats(filter_callback=_filter).add('dummy.ys').print_all() funcs: is an iterable that selects functions via method descriptor/bound method or function object. selector type depends on the function object: If function is a builtin method, you can use method_descriptor. If it is a builtin function you can select it like e.g: `time.sleep`. For other cases you could use anything that has a code object.
yappi/yappi.py
func_matches
gaborbernat/yappi
877
python
def func_matches(stat, funcs): "\n This function will not work with stats that are saved and loaded. That is \n because current API of loading stats is as following:\n yappi.get_func_stats(filter_callback=_filter).add('dummy.ys').print_all()\n\n funcs: is an iterable that selects functions via method descriptor/bound method\n or function object. selector type depends on the function object: If function\n is a builtin method, you can use method_descriptor. If it is a builtin function\n you can select it like e.g: `time.sleep`. For other cases you could use anything \n that has a code object.\n " if (not isinstance(stat, YStat)): raise YappiError(("Argument 'stat' shall be a YStat object. (%s)" % stat)) if (not isinstance(funcs, list)): raise YappiError(("Argument 'funcs' is not a list object. (%s)" % funcs)) if (not len(funcs)): raise YappiError("Argument 'funcs' cannot be empty.") if (stat.full_name not in _fn_descriptor_dict): return False funcs = set(funcs) for func in funcs.copy(): if (not callable(func)): raise YappiError(("Non-callable item in 'funcs'. (%s)" % func)) if getattr(func, '__code__', None): funcs.add(func.__code__) try: return (_fn_descriptor_dict[stat.full_name] in funcs) except TypeError: return False
def func_matches(stat, funcs): "\n This function will not work with stats that are saved and loaded. That is \n because current API of loading stats is as following:\n yappi.get_func_stats(filter_callback=_filter).add('dummy.ys').print_all()\n\n funcs: is an iterable that selects functions via method descriptor/bound method\n or function object. selector type depends on the function object: If function\n is a builtin method, you can use method_descriptor. If it is a builtin function\n you can select it like e.g: `time.sleep`. For other cases you could use anything \n that has a code object.\n " if (not isinstance(stat, YStat)): raise YappiError(("Argument 'stat' shall be a YStat object. (%s)" % stat)) if (not isinstance(funcs, list)): raise YappiError(("Argument 'funcs' is not a list object. (%s)" % funcs)) if (not len(funcs)): raise YappiError("Argument 'funcs' cannot be empty.") if (stat.full_name not in _fn_descriptor_dict): return False funcs = set(funcs) for func in funcs.copy(): if (not callable(func)): raise YappiError(("Non-callable item in 'funcs'. (%s)" % func)) if getattr(func, '__code__', None): funcs.add(func.__code__) try: return (_fn_descriptor_dict[stat.full_name] in funcs) except TypeError: return False<|docstring|>This function will not work with stats that are saved and loaded. That is because current API of loading stats is as following: yappi.get_func_stats(filter_callback=_filter).add('dummy.ys').print_all() funcs: is an iterable that selects functions via method descriptor/bound method or function object. selector type depends on the function object: If function is a builtin method, you can use method_descriptor. If it is a builtin function you can select it like e.g: `time.sleep`. For other cases you could use anything that has a code object.<|endoftext|>
65fd95eaec84a0b960aea6feda9c247e4f4bcc16cf8f033c76d967db47cfdd68
def profile(clock_type='cpu', profile_builtins=False, return_callback=None): '\n A profile decorator that can be used to profile a single call.\n\n We need to clear_stats() on entry/exit of the function unfortunately.\n As yappi is a per-interpreter resource, we cannot simply resume profiling\n session upon exit of the function, that is because we _may_ simply change\n start() params which may differ from the paused session that may cause instable\n results. So, if you use a decorator, then global profiling may return bogus\n results or no results at all.\n ' def _profile_dec(func): def wrapper(*args, **kwargs): if (func._rec_level == 0): clear_stats() set_clock_type(clock_type) start(profile_builtins, profile_threads=False) func._rec_level += 1 try: return func(*args, **kwargs) finally: func._rec_level -= 1 if (func._rec_level == 0): try: stop() if (return_callback is None): sys.stdout.write(LINESEP) sys.stdout.write(('Executed in %s %s clock seconds' % (_fft(get_thread_stats()[0].ttot), clock_type.upper()))) sys.stdout.write(LINESEP) get_func_stats().print_all() else: return_callback(func, get_func_stats()) finally: clear_stats() func._rec_level = 0 return wrapper return _profile_dec
A profile decorator that can be used to profile a single call. We need to clear_stats() on entry/exit of the function unfortunately. As yappi is a per-interpreter resource, we cannot simply resume profiling session upon exit of the function, that is because we _may_ simply change start() params which may differ from the paused session that may cause instable results. So, if you use a decorator, then global profiling may return bogus results or no results at all.
yappi/yappi.py
profile
gaborbernat/yappi
877
python
def profile(clock_type='cpu', profile_builtins=False, return_callback=None): '\n A profile decorator that can be used to profile a single call.\n\n We need to clear_stats() on entry/exit of the function unfortunately.\n As yappi is a per-interpreter resource, we cannot simply resume profiling\n session upon exit of the function, that is because we _may_ simply change\n start() params which may differ from the paused session that may cause instable\n results. So, if you use a decorator, then global profiling may return bogus\n results or no results at all.\n ' def _profile_dec(func): def wrapper(*args, **kwargs): if (func._rec_level == 0): clear_stats() set_clock_type(clock_type) start(profile_builtins, profile_threads=False) func._rec_level += 1 try: return func(*args, **kwargs) finally: func._rec_level -= 1 if (func._rec_level == 0): try: stop() if (return_callback is None): sys.stdout.write(LINESEP) sys.stdout.write(('Executed in %s %s clock seconds' % (_fft(get_thread_stats()[0].ttot), clock_type.upper()))) sys.stdout.write(LINESEP) get_func_stats().print_all() else: return_callback(func, get_func_stats()) finally: clear_stats() func._rec_level = 0 return wrapper return _profile_dec
def profile(clock_type='cpu', profile_builtins=False, return_callback=None): '\n A profile decorator that can be used to profile a single call.\n\n We need to clear_stats() on entry/exit of the function unfortunately.\n As yappi is a per-interpreter resource, we cannot simply resume profiling\n session upon exit of the function, that is because we _may_ simply change\n start() params which may differ from the paused session that may cause instable\n results. So, if you use a decorator, then global profiling may return bogus\n results or no results at all.\n ' def _profile_dec(func): def wrapper(*args, **kwargs): if (func._rec_level == 0): clear_stats() set_clock_type(clock_type) start(profile_builtins, profile_threads=False) func._rec_level += 1 try: return func(*args, **kwargs) finally: func._rec_level -= 1 if (func._rec_level == 0): try: stop() if (return_callback is None): sys.stdout.write(LINESEP) sys.stdout.write(('Executed in %s %s clock seconds' % (_fft(get_thread_stats()[0].ttot), clock_type.upper()))) sys.stdout.write(LINESEP) get_func_stats().print_all() else: return_callback(func, get_func_stats()) finally: clear_stats() func._rec_level = 0 return wrapper return _profile_dec<|docstring|>A profile decorator that can be used to profile a single call. We need to clear_stats() on entry/exit of the function unfortunately. As yappi is a per-interpreter resource, we cannot simply resume profiling session upon exit of the function, that is because we _may_ simply change start() params which may differ from the paused session that may cause instable results. So, if you use a decorator, then global profiling may return bogus results or no results at all.<|endoftext|>
d4a841042e050334dd17b6c08792a4372ab7fcd5f3b1d8e506b1cdec7b897c0e
def is_running(): '\n Returns true if the profiler is running, false otherwise.\n ' return bool(_yappi.is_running())
Returns true if the profiler is running, false otherwise.
yappi/yappi.py
is_running
gaborbernat/yappi
877
python
def is_running(): '\n \n ' return bool(_yappi.is_running())
def is_running(): '\n \n ' return bool(_yappi.is_running())<|docstring|>Returns true if the profiler is running, false otherwise.<|endoftext|>
c579ee1821c3ac4f8a1ea0f31b5dcbc1517b46a21d93e6c6235a4789db7d6812
def start(builtins=False, profile_threads=True, profile_greenlets=True): "\n Start profiler.\n\n profile_threads: Set to True to profile multiple threads. Set to false\n to profile only the invoking thread. This argument is only respected when\n context backend is 'native_thread' and ignored otherwise.\n\n profile_greenlets: Set to True to to profile multiple greenlets. Set to\n False to profile only the invoking greenlet. This argument is only respected\n when context backend is 'greenlet' and ignored otherwise.\n " backend = _yappi.get_context_backend() profile_contexts = ((profile_threads and (backend == NATIVE_THREAD)) or (profile_greenlets and (backend == GREENLET))) if profile_contexts: threading.setprofile(_profile_thread_callback) _yappi.start(builtins, profile_contexts)
Start profiler. profile_threads: Set to True to profile multiple threads. Set to false to profile only the invoking thread. This argument is only respected when context backend is 'native_thread' and ignored otherwise. profile_greenlets: Set to True to to profile multiple greenlets. Set to False to profile only the invoking greenlet. This argument is only respected when context backend is 'greenlet' and ignored otherwise.
yappi/yappi.py
start
gaborbernat/yappi
877
python
def start(builtins=False, profile_threads=True, profile_greenlets=True): "\n Start profiler.\n\n profile_threads: Set to True to profile multiple threads. Set to false\n to profile only the invoking thread. This argument is only respected when\n context backend is 'native_thread' and ignored otherwise.\n\n profile_greenlets: Set to True to to profile multiple greenlets. Set to\n False to profile only the invoking greenlet. This argument is only respected\n when context backend is 'greenlet' and ignored otherwise.\n " backend = _yappi.get_context_backend() profile_contexts = ((profile_threads and (backend == NATIVE_THREAD)) or (profile_greenlets and (backend == GREENLET))) if profile_contexts: threading.setprofile(_profile_thread_callback) _yappi.start(builtins, profile_contexts)
def start(builtins=False, profile_threads=True, profile_greenlets=True): "\n Start profiler.\n\n profile_threads: Set to True to profile multiple threads. Set to false\n to profile only the invoking thread. This argument is only respected when\n context backend is 'native_thread' and ignored otherwise.\n\n profile_greenlets: Set to True to to profile multiple greenlets. Set to\n False to profile only the invoking greenlet. This argument is only respected\n when context backend is 'greenlet' and ignored otherwise.\n " backend = _yappi.get_context_backend() profile_contexts = ((profile_threads and (backend == NATIVE_THREAD)) or (profile_greenlets and (backend == GREENLET))) if profile_contexts: threading.setprofile(_profile_thread_callback) _yappi.start(builtins, profile_contexts)<|docstring|>Start profiler. profile_threads: Set to True to profile multiple threads. Set to false to profile only the invoking thread. This argument is only respected when context backend is 'native_thread' and ignored otherwise. profile_greenlets: Set to True to to profile multiple greenlets. Set to False to profile only the invoking greenlet. This argument is only respected when context backend is 'greenlet' and ignored otherwise.<|endoftext|>
a40c55c606f04b8dd160624e9781e842ae9784470f9a9e24e1b6034ccdc98e4f
def get_func_stats(tag=None, ctx_id=None, filter=None, filter_callback=None): '\n Gets the function profiler results with given filters and returns an iterable.\n\n filter: is here mainly for backward compat. we will not document it anymore.\n tag, ctx_id: select given tag and ctx_id related stats in C side.\n filter_callback: we could do it like: get_func_stats().filter(). The problem\n with this approach is YFuncStats has an internal list which complicates:\n - delete() operation because list deletions are O(n)\n - sort() and pop() operations currently work on sorted list and they hold the\n list as sorted.\n To preserve above behaviour and have a delete() method, we can use an OrderedDict()\n maybe, but simply that is not worth the effort for an extra filter() call. Maybe\n in the future.\n ' if (not filter): filter = {} if tag: filter['tag'] = tag if ctx_id: filter['ctx_id'] = ctx_id _yappi._pause() try: stats = YFuncStats().get(filter=filter, filter_callback=filter_callback) finally: _yappi._resume() return stats
Gets the function profiler results with given filters and returns an iterable. filter: is here mainly for backward compat. we will not document it anymore. tag, ctx_id: select given tag and ctx_id related stats in C side. filter_callback: we could do it like: get_func_stats().filter(). The problem with this approach is YFuncStats has an internal list which complicates: - delete() operation because list deletions are O(n) - sort() and pop() operations currently work on sorted list and they hold the list as sorted. To preserve above behaviour and have a delete() method, we can use an OrderedDict() maybe, but simply that is not worth the effort for an extra filter() call. Maybe in the future.
yappi/yappi.py
get_func_stats
gaborbernat/yappi
877
python
def get_func_stats(tag=None, ctx_id=None, filter=None, filter_callback=None): '\n Gets the function profiler results with given filters and returns an iterable.\n\n filter: is here mainly for backward compat. we will not document it anymore.\n tag, ctx_id: select given tag and ctx_id related stats in C side.\n filter_callback: we could do it like: get_func_stats().filter(). The problem\n with this approach is YFuncStats has an internal list which complicates:\n - delete() operation because list deletions are O(n)\n - sort() and pop() operations currently work on sorted list and they hold the\n list as sorted.\n To preserve above behaviour and have a delete() method, we can use an OrderedDict()\n maybe, but simply that is not worth the effort for an extra filter() call. Maybe\n in the future.\n ' if (not filter): filter = {} if tag: filter['tag'] = tag if ctx_id: filter['ctx_id'] = ctx_id _yappi._pause() try: stats = YFuncStats().get(filter=filter, filter_callback=filter_callback) finally: _yappi._resume() return stats
def get_func_stats(tag=None, ctx_id=None, filter=None, filter_callback=None): '\n Gets the function profiler results with given filters and returns an iterable.\n\n filter: is here mainly for backward compat. we will not document it anymore.\n tag, ctx_id: select given tag and ctx_id related stats in C side.\n filter_callback: we could do it like: get_func_stats().filter(). The problem\n with this approach is YFuncStats has an internal list which complicates:\n - delete() operation because list deletions are O(n)\n - sort() and pop() operations currently work on sorted list and they hold the\n list as sorted.\n To preserve above behaviour and have a delete() method, we can use an OrderedDict()\n maybe, but simply that is not worth the effort for an extra filter() call. Maybe\n in the future.\n ' if (not filter): filter = {} if tag: filter['tag'] = tag if ctx_id: filter['ctx_id'] = ctx_id _yappi._pause() try: stats = YFuncStats().get(filter=filter, filter_callback=filter_callback) finally: _yappi._resume() return stats<|docstring|>Gets the function profiler results with given filters and returns an iterable. filter: is here mainly for backward compat. we will not document it anymore. tag, ctx_id: select given tag and ctx_id related stats in C side. filter_callback: we could do it like: get_func_stats().filter(). The problem with this approach is YFuncStats has an internal list which complicates: - delete() operation because list deletions are O(n) - sort() and pop() operations currently work on sorted list and they hold the list as sorted. To preserve above behaviour and have a delete() method, we can use an OrderedDict() maybe, but simply that is not worth the effort for an extra filter() call. Maybe in the future.<|endoftext|>
6e6ace310154db2b79b9fd77d87f3def992b419299590e157a45ba137279a0ca
def get_thread_stats(): '\n Gets the thread profiler results with given filters and returns an iterable.\n ' return YThreadStats().get()
Gets the thread profiler results with given filters and returns an iterable.
yappi/yappi.py
get_thread_stats
gaborbernat/yappi
877
python
def get_thread_stats(): '\n \n ' return YThreadStats().get()
def get_thread_stats(): '\n \n ' return YThreadStats().get()<|docstring|>Gets the thread profiler results with given filters and returns an iterable.<|endoftext|>
a70dee86115699c9db85e9d0bca843bccdbbe29c17d631575f0d323d86a47007
def get_greenlet_stats(): '\n Gets the greenlet stats captured by the profiler\n ' return YGreenletStats().get()
Gets the greenlet stats captured by the profiler
yappi/yappi.py
get_greenlet_stats
gaborbernat/yappi
877
python
def get_greenlet_stats(): '\n \n ' return YGreenletStats().get()
def get_greenlet_stats(): '\n \n ' return YGreenletStats().get()<|docstring|>Gets the greenlet stats captured by the profiler<|endoftext|>
201253b27daf84bdda97bf905731ebf12fd51d9bcd41b3f3bfc7ddb463c4be3b
def stop(): '\n Stop profiler.\n ' _yappi.stop() threading.setprofile(None)
Stop profiler.
yappi/yappi.py
stop
gaborbernat/yappi
877
python
def stop(): '\n \n ' _yappi.stop() threading.setprofile(None)
def stop(): '\n \n ' _yappi.stop() threading.setprofile(None)<|docstring|>Stop profiler.<|endoftext|>
6de6cc0be4de5e345dd6e9bf6619a8065b6362c5f64da757ba6cad1e594b3cb9
@contextmanager def run(builtins=False, profile_threads=True, profile_greenlets=True): '\n Context manger for profiling block of code.\n\n Starts profiling before entering the context, and stop profilying when\n exiting from the context.\n\n Usage:\n\n with yappi.run():\n print("this call is profiled")\n\n Warning: don\'t use this recursively, the inner context will stop profiling\n when exited:\n\n with yappi.run():\n with yappi.run():\n print("this call will be profiled")\n print("this call will *not* be profiled")\n ' start(builtins=builtins, profile_threads=profile_threads, profile_greenlets=profile_greenlets) try: (yield) finally: stop()
Context manger for profiling block of code. Starts profiling before entering the context, and stop profilying when exiting from the context. Usage: with yappi.run(): print("this call is profiled") Warning: don't use this recursively, the inner context will stop profiling when exited: with yappi.run(): with yappi.run(): print("this call will be profiled") print("this call will *not* be profiled")
yappi/yappi.py
run
gaborbernat/yappi
877
python
@contextmanager def run(builtins=False, profile_threads=True, profile_greenlets=True): '\n Context manger for profiling block of code.\n\n Starts profiling before entering the context, and stop profilying when\n exiting from the context.\n\n Usage:\n\n with yappi.run():\n print("this call is profiled")\n\n Warning: don\'t use this recursively, the inner context will stop profiling\n when exited:\n\n with yappi.run():\n with yappi.run():\n print("this call will be profiled")\n print("this call will *not* be profiled")\n ' start(builtins=builtins, profile_threads=profile_threads, profile_greenlets=profile_greenlets) try: (yield) finally: stop()
@contextmanager def run(builtins=False, profile_threads=True, profile_greenlets=True): '\n Context manger for profiling block of code.\n\n Starts profiling before entering the context, and stop profilying when\n exiting from the context.\n\n Usage:\n\n with yappi.run():\n print("this call is profiled")\n\n Warning: don\'t use this recursively, the inner context will stop profiling\n when exited:\n\n with yappi.run():\n with yappi.run():\n print("this call will be profiled")\n print("this call will *not* be profiled")\n ' start(builtins=builtins, profile_threads=profile_threads, profile_greenlets=profile_greenlets) try: (yield) finally: stop()<|docstring|>Context manger for profiling block of code. Starts profiling before entering the context, and stop profilying when exiting from the context. Usage: with yappi.run(): print("this call is profiled") Warning: don't use this recursively, the inner context will stop profiling when exited: with yappi.run(): with yappi.run(): print("this call will be profiled") print("this call will *not* be profiled")<|endoftext|>
fd9c71b3c58ba6298299d24d105d299edb5b88e5b48ad9519c28725c2252ae9f
def clear_stats(): '\n Clears all of the profile results.\n ' _yappi._pause() try: _yappi.clear_stats() finally: _yappi._resume()
Clears all of the profile results.
yappi/yappi.py
clear_stats
gaborbernat/yappi
877
python
def clear_stats(): '\n \n ' _yappi._pause() try: _yappi.clear_stats() finally: _yappi._resume()
def clear_stats(): '\n \n ' _yappi._pause() try: _yappi.clear_stats() finally: _yappi._resume()<|docstring|>Clears all of the profile results.<|endoftext|>
0261c49a1498a3083ef340b40bf31ce7d2917bf6e37d878d1f37c2d7c70b9e23
def get_clock_time(): '\n Returns the current clock time with regard to current clock type.\n ' return _yappi.get_clock_time()
Returns the current clock time with regard to current clock type.
yappi/yappi.py
get_clock_time
gaborbernat/yappi
877
python
def get_clock_time(): '\n \n ' return _yappi.get_clock_time()
def get_clock_time(): '\n \n ' return _yappi.get_clock_time()<|docstring|>Returns the current clock time with regard to current clock type.<|endoftext|>
07c61b933ad311e695c8548ff4739e953630c944562a3e23982c70670537c8c4
def get_clock_type(): '\n Returns the underlying clock type\n ' return _yappi.get_clock_type()
Returns the underlying clock type
yappi/yappi.py
get_clock_type
gaborbernat/yappi
877
python
def get_clock_type(): '\n \n ' return _yappi.get_clock_type()
def get_clock_type(): '\n \n ' return _yappi.get_clock_type()<|docstring|>Returns the underlying clock type<|endoftext|>
486c47d3a6eebdec333b847fc902332d9bff5f3a8ce5d96b51c5a39a2c0b430d
def get_clock_info(): '\n Returns a dict containing the OS API used for timing, the precision of the\n underlying clock.\n ' return _yappi.get_clock_info()
Returns a dict containing the OS API used for timing, the precision of the underlying clock.
yappi/yappi.py
get_clock_info
gaborbernat/yappi
877
python
def get_clock_info(): '\n Returns a dict containing the OS API used for timing, the precision of the\n underlying clock.\n ' return _yappi.get_clock_info()
def get_clock_info(): '\n Returns a dict containing the OS API used for timing, the precision of the\n underlying clock.\n ' return _yappi.get_clock_info()<|docstring|>Returns a dict containing the OS API used for timing, the precision of the underlying clock.<|endoftext|>
ac55b277ad3fb7747322e3493577d143efbb3abc70d86a0dcf2ab948a3b7f77b
def set_clock_type(type): '\n Sets the internal clock type for timing. Profiler shall not have any previous stats.\n Otherwise an exception is thrown.\n ' type = type.upper() if (type not in CLOCK_TYPES): raise YappiError(('Invalid clock type:%s' % type)) _yappi.set_clock_type(CLOCK_TYPES[type])
Sets the internal clock type for timing. Profiler shall not have any previous stats. Otherwise an exception is thrown.
yappi/yappi.py
set_clock_type
gaborbernat/yappi
877
python
def set_clock_type(type): '\n Sets the internal clock type for timing. Profiler shall not have any previous stats.\n Otherwise an exception is thrown.\n ' type = type.upper() if (type not in CLOCK_TYPES): raise YappiError(('Invalid clock type:%s' % type)) _yappi.set_clock_type(CLOCK_TYPES[type])
def set_clock_type(type): '\n Sets the internal clock type for timing. Profiler shall not have any previous stats.\n Otherwise an exception is thrown.\n ' type = type.upper() if (type not in CLOCK_TYPES): raise YappiError(('Invalid clock type:%s' % type)) _yappi.set_clock_type(CLOCK_TYPES[type])<|docstring|>Sets the internal clock type for timing. Profiler shall not have any previous stats. Otherwise an exception is thrown.<|endoftext|>
c4515ef31fce816dd4a946c97093ba63a08cba8392409c26aa5c5d0cfee70de6
def get_mem_usage(): '\n Returns the internal memory usage of the profiler itself.\n ' return _yappi.get_mem_usage()
Returns the internal memory usage of the profiler itself.
yappi/yappi.py
get_mem_usage
gaborbernat/yappi
877
python
def get_mem_usage(): '\n \n ' return _yappi.get_mem_usage()
def get_mem_usage(): '\n \n ' return _yappi.get_mem_usage()<|docstring|>Returns the internal memory usage of the profiler itself.<|endoftext|>
f1399168ab506cadf8cf141392abca66ff6d49b261fd423193fe77c8184f18f6
def set_tag_callback(cbk): '\n Every stat. entry will have a specific tag field and users might be able\n to filter on stats via tag field.\n ' return _yappi.set_tag_callback(cbk)
Every stat. entry will have a specific tag field and users might be able to filter on stats via tag field.
yappi/yappi.py
set_tag_callback
gaborbernat/yappi
877
python
def set_tag_callback(cbk): '\n Every stat. entry will have a specific tag field and users might be able\n to filter on stats via tag field.\n ' return _yappi.set_tag_callback(cbk)
def set_tag_callback(cbk): '\n Every stat. entry will have a specific tag field and users might be able\n to filter on stats via tag field.\n ' return _yappi.set_tag_callback(cbk)<|docstring|>Every stat. entry will have a specific tag field and users might be able to filter on stats via tag field.<|endoftext|>
98ea756258afe4e5725bb29894a89c698d3d923e130fe45c20efae7edf4de3c3
def set_context_backend(type): '\n Sets the internal context backend used to track execution context.\n\n type must be one of \'greenlet\' or \'native_thread\'. For example:\n\n >>> import greenlet, yappi\n >>> yappi.set_context_backend("greenlet")\n\n Setting the context backend will reset any callbacks configured via:\n - set_context_id_callback\n - set_context_name_callback\n\n The default callbacks for the backend provided will be installed instead.\n Configure the callbacks each time after setting context backend.\n ' type = type.upper() if (type not in BACKEND_TYPES): raise YappiError(('Invalid backend type: %s' % type)) if (type == GREENLET): (id_cbk, name_cbk) = _create_greenlet_callbacks() _yappi.set_context_id_callback(id_cbk) set_context_name_callback(name_cbk) else: _yappi.set_context_id_callback(None) set_context_name_callback(None) _yappi.set_context_backend(BACKEND_TYPES[type])
Sets the internal context backend used to track execution context. type must be one of 'greenlet' or 'native_thread'. For example: >>> import greenlet, yappi >>> yappi.set_context_backend("greenlet") Setting the context backend will reset any callbacks configured via: - set_context_id_callback - set_context_name_callback The default callbacks for the backend provided will be installed instead. Configure the callbacks each time after setting context backend.
yappi/yappi.py
set_context_backend
gaborbernat/yappi
877
python
def set_context_backend(type): '\n Sets the internal context backend used to track execution context.\n\n type must be one of \'greenlet\' or \'native_thread\'. For example:\n\n >>> import greenlet, yappi\n >>> yappi.set_context_backend("greenlet")\n\n Setting the context backend will reset any callbacks configured via:\n - set_context_id_callback\n - set_context_name_callback\n\n The default callbacks for the backend provided will be installed instead.\n Configure the callbacks each time after setting context backend.\n ' type = type.upper() if (type not in BACKEND_TYPES): raise YappiError(('Invalid backend type: %s' % type)) if (type == GREENLET): (id_cbk, name_cbk) = _create_greenlet_callbacks() _yappi.set_context_id_callback(id_cbk) set_context_name_callback(name_cbk) else: _yappi.set_context_id_callback(None) set_context_name_callback(None) _yappi.set_context_backend(BACKEND_TYPES[type])
def set_context_backend(type): '\n Sets the internal context backend used to track execution context.\n\n type must be one of \'greenlet\' or \'native_thread\'. For example:\n\n >>> import greenlet, yappi\n >>> yappi.set_context_backend("greenlet")\n\n Setting the context backend will reset any callbacks configured via:\n - set_context_id_callback\n - set_context_name_callback\n\n The default callbacks for the backend provided will be installed instead.\n Configure the callbacks each time after setting context backend.\n ' type = type.upper() if (type not in BACKEND_TYPES): raise YappiError(('Invalid backend type: %s' % type)) if (type == GREENLET): (id_cbk, name_cbk) = _create_greenlet_callbacks() _yappi.set_context_id_callback(id_cbk) set_context_name_callback(name_cbk) else: _yappi.set_context_id_callback(None) set_context_name_callback(None) _yappi.set_context_backend(BACKEND_TYPES[type])<|docstring|>Sets the internal context backend used to track execution context. type must be one of 'greenlet' or 'native_thread'. For example: >>> import greenlet, yappi >>> yappi.set_context_backend("greenlet") Setting the context backend will reset any callbacks configured via: - set_context_id_callback - set_context_name_callback The default callbacks for the backend provided will be installed instead. Configure the callbacks each time after setting context backend.<|endoftext|>
0bcd6845b6ec9835bdc736746afd67e2b858b2c4d9ef4cc8c3a334ad5ef95977
def set_context_id_callback(callback): '\n Use a number other than thread_id to determine the current context.\n\n The callback must take no arguments and return an integer. For example:\n\n >>> import greenlet, yappi\n >>> yappi.set_context_id_callback(lambda: id(greenlet.getcurrent()))\n ' return _yappi.set_context_id_callback(callback)
Use a number other than thread_id to determine the current context. The callback must take no arguments and return an integer. For example: >>> import greenlet, yappi >>> yappi.set_context_id_callback(lambda: id(greenlet.getcurrent()))
yappi/yappi.py
set_context_id_callback
gaborbernat/yappi
877
python
def set_context_id_callback(callback): '\n Use a number other than thread_id to determine the current context.\n\n The callback must take no arguments and return an integer. For example:\n\n >>> import greenlet, yappi\n >>> yappi.set_context_id_callback(lambda: id(greenlet.getcurrent()))\n ' return _yappi.set_context_id_callback(callback)
def set_context_id_callback(callback): '\n Use a number other than thread_id to determine the current context.\n\n The callback must take no arguments and return an integer. For example:\n\n >>> import greenlet, yappi\n >>> yappi.set_context_id_callback(lambda: id(greenlet.getcurrent()))\n ' return _yappi.set_context_id_callback(callback)<|docstring|>Use a number other than thread_id to determine the current context. The callback must take no arguments and return an integer. For example: >>> import greenlet, yappi >>> yappi.set_context_id_callback(lambda: id(greenlet.getcurrent()))<|endoftext|>
021f07146ba1e92eb295fb364f26555145ced90246ca188499eaf2ddc3bb908b
def set_context_name_callback(callback): "\n Set the callback to retrieve current context's name.\n\n The callback must take no arguments and return a string. For example:\n\n >>> import greenlet, yappi\n >>> yappi.set_context_name_callback(\n ... lambda: greenlet.getcurrent().__class__.__name__)\n\n If the callback cannot return the name at this time but may be able to\n return it later, it should return None.\n " if (callback is None): return _yappi.set_context_name_callback(_ctx_name_callback) return _yappi.set_context_name_callback(callback)
Set the callback to retrieve current context's name. The callback must take no arguments and return a string. For example: >>> import greenlet, yappi >>> yappi.set_context_name_callback( ... lambda: greenlet.getcurrent().__class__.__name__) If the callback cannot return the name at this time but may be able to return it later, it should return None.
yappi/yappi.py
set_context_name_callback
gaborbernat/yappi
877
python
def set_context_name_callback(callback): "\n Set the callback to retrieve current context's name.\n\n The callback must take no arguments and return a string. For example:\n\n >>> import greenlet, yappi\n >>> yappi.set_context_name_callback(\n ... lambda: greenlet.getcurrent().__class__.__name__)\n\n If the callback cannot return the name at this time but may be able to\n return it later, it should return None.\n " if (callback is None): return _yappi.set_context_name_callback(_ctx_name_callback) return _yappi.set_context_name_callback(callback)
def set_context_name_callback(callback): "\n Set the callback to retrieve current context's name.\n\n The callback must take no arguments and return a string. For example:\n\n >>> import greenlet, yappi\n >>> yappi.set_context_name_callback(\n ... lambda: greenlet.getcurrent().__class__.__name__)\n\n If the callback cannot return the name at this time but may be able to\n return it later, it should return None.\n " if (callback is None): return _yappi.set_context_name_callback(_ctx_name_callback) return _yappi.set_context_name_callback(callback)<|docstring|>Set the callback to retrieve current context's name. The callback must take no arguments and return a string. For example: >>> import greenlet, yappi >>> yappi.set_context_name_callback( ... lambda: greenlet.getcurrent().__class__.__name__) If the callback cannot return the name at this time but may be able to return it later, it should return None.<|endoftext|>
413312f1335f4ee9ab3cbddae6b5d6f86e8594a1c3e4ab7c7fe4bb90d67553d9
def _debug_check_sanity(self): '\n Check for basic sanity errors in stats. e.g: Check for duplicate stats.\n ' for x in self: if (self.count(x) > 1): return False return True
Check for basic sanity errors in stats. e.g: Check for duplicate stats.
yappi/yappi.py
_debug_check_sanity
gaborbernat/yappi
877
python
def _debug_check_sanity(self): '\n \n ' for x in self: if (self.count(x) > 1): return False return True
def _debug_check_sanity(self): '\n \n ' for x in self: if (self.count(x) > 1): return False return True<|docstring|>Check for basic sanity errors in stats. e.g: Check for duplicate stats.<|endoftext|>
1ad59e1a51972b7df467da555ad559bbb8b31f939a24a7958931b290abeae188
def print_all(self, out=sys.stdout, columns={0: ('name', 36), 1: ('ncall', 5), 2: ('tsub', 8), 3: ('ttot', 8), 4: ('tavg', 8)}): '\n Prints all of the child function profiler results to a given file. (stdout by default)\n ' if (self.empty() or (len(columns) == 0)): return for (_, col) in columns.items(): _validate_columns(col[0], COLUMNS_FUNCSTATS) out.write(LINESEP) self._print_header(out, columns) for stat in self: stat._print(out, columns)
Prints all of the child function profiler results to a given file. (stdout by default)
yappi/yappi.py
print_all
gaborbernat/yappi
877
python
def print_all(self, out=sys.stdout, columns={0: ('name', 36), 1: ('ncall', 5), 2: ('tsub', 8), 3: ('ttot', 8), 4: ('tavg', 8)}): '\n \n ' if (self.empty() or (len(columns) == 0)): return for (_, col) in columns.items(): _validate_columns(col[0], COLUMNS_FUNCSTATS) out.write(LINESEP) self._print_header(out, columns) for stat in self: stat._print(out, columns)
def print_all(self, out=sys.stdout, columns={0: ('name', 36), 1: ('ncall', 5), 2: ('tsub', 8), 3: ('ttot', 8), 4: ('tavg', 8)}): '\n \n ' if (self.empty() or (len(columns) == 0)): return for (_, col) in columns.items(): _validate_columns(col[0], COLUMNS_FUNCSTATS) out.write(LINESEP) self._print_header(out, columns) for stat in self: stat._print(out, columns)<|docstring|>Prints all of the child function profiler results to a given file. (stdout by default)<|endoftext|>
ba2b7312c4c88ff84dddc6ff5b89e114fda4d906bcd8c32eec9aa8573690afaf
def _save_as_PSTAT(self, path): '\n Save the profiling information as PSTAT.\n ' _stats = convert2pstats(self) _stats.dump_stats(path)
Save the profiling information as PSTAT.
yappi/yappi.py
_save_as_PSTAT
gaborbernat/yappi
877
python
def _save_as_PSTAT(self, path): '\n \n ' _stats = convert2pstats(self) _stats.dump_stats(path)
def _save_as_PSTAT(self, path): '\n \n ' _stats = convert2pstats(self) _stats.dump_stats(path)<|docstring|>Save the profiling information as PSTAT.<|endoftext|>
9346d0c612dc3879f13bc3998283b3b6f521cf9b3b2c8d65b9bfc3a682b468f2
def _save_as_CALLGRIND(self, path): '\n Writes all the function stats in a callgrind-style format to the given\n file. (stdout by default)\n ' header = ('version: 1\ncreator: %s\npid: %d\ncmd: %s\npart: 1\n\nevents: Ticks' % ('yappi', os.getpid(), ' '.join(sys.argv))) lines = [header] file_ids = [''] func_ids = [''] for func_stat in self: file_ids += [('fl=(%d) %s' % (func_stat.index, func_stat.module))] func_ids += [('fn=(%d) %s %s:%s' % (func_stat.index, func_stat.name, func_stat.module, func_stat.lineno))] lines += (file_ids + func_ids) for func_stat in self: func_stats = ['', ('fl=(%d)' % func_stat.index), ('fn=(%d)' % func_stat.index)] func_stats += [('%s %s' % (func_stat.lineno, int((func_stat.tsub * 1000000.0))))] for child in func_stat.children: func_stats += [('cfl=(%d)' % child.index), ('cfn=(%d)' % child.index), ('calls=%d 0' % child.ncall), ('0 %d' % int((child.ttot * 1000000.0)))] lines += func_stats with open(path, 'w') as f: f.write('\n'.join(lines))
Writes all the function stats in a callgrind-style format to the given file. (stdout by default)
yappi/yappi.py
_save_as_CALLGRIND
gaborbernat/yappi
877
python
def _save_as_CALLGRIND(self, path): '\n Writes all the function stats in a callgrind-style format to the given\n file. (stdout by default)\n ' header = ('version: 1\ncreator: %s\npid: %d\ncmd: %s\npart: 1\n\nevents: Ticks' % ('yappi', os.getpid(), ' '.join(sys.argv))) lines = [header] file_ids = [] func_ids = [] for func_stat in self: file_ids += [('fl=(%d) %s' % (func_stat.index, func_stat.module))] func_ids += [('fn=(%d) %s %s:%s' % (func_stat.index, func_stat.name, func_stat.module, func_stat.lineno))] lines += (file_ids + func_ids) for func_stat in self: func_stats = [, ('fl=(%d)' % func_stat.index), ('fn=(%d)' % func_stat.index)] func_stats += [('%s %s' % (func_stat.lineno, int((func_stat.tsub * 1000000.0))))] for child in func_stat.children: func_stats += [('cfl=(%d)' % child.index), ('cfn=(%d)' % child.index), ('calls=%d 0' % child.ncall), ('0 %d' % int((child.ttot * 1000000.0)))] lines += func_stats with open(path, 'w') as f: f.write('\n'.join(lines))
def _save_as_CALLGRIND(self, path): '\n Writes all the function stats in a callgrind-style format to the given\n file. (stdout by default)\n ' header = ('version: 1\ncreator: %s\npid: %d\ncmd: %s\npart: 1\n\nevents: Ticks' % ('yappi', os.getpid(), ' '.join(sys.argv))) lines = [header] file_ids = [] func_ids = [] for func_stat in self: file_ids += [('fl=(%d) %s' % (func_stat.index, func_stat.module))] func_ids += [('fn=(%d) %s %s:%s' % (func_stat.index, func_stat.name, func_stat.module, func_stat.lineno))] lines += (file_ids + func_ids) for func_stat in self: func_stats = [, ('fl=(%d)' % func_stat.index), ('fn=(%d)' % func_stat.index)] func_stats += [('%s %s' % (func_stat.lineno, int((func_stat.tsub * 1000000.0))))] for child in func_stat.children: func_stats += [('cfl=(%d)' % child.index), ('cfn=(%d)' % child.index), ('calls=%d 0' % child.ncall), ('0 %d' % int((child.ttot * 1000000.0)))] lines += func_stats with open(path, 'w') as f: f.write('\n'.join(lines))<|docstring|>Writes all the function stats in a callgrind-style format to the given file. (stdout by default)<|endoftext|>
0ca88b324a1e87b6fe1ce5f32bb5b2c05531f7f1c314ad677d3852b4d539979c
def print_all(self, out=sys.stdout, columns={0: ('name', 36), 1: ('ncall', 5), 2: ('tsub', 8), 3: ('ttot', 8), 4: ('tavg', 8)}): '\n Prints all of the function profiler results to a given file. (stdout by default)\n ' if self.empty(): return for (_, col) in columns.items(): _validate_columns(col[0], COLUMNS_FUNCSTATS) out.write(LINESEP) out.write(('Clock type: %s' % self._clock_type.upper())) out.write(LINESEP) out.write(('Ordered by: %s, %s' % (self._sort_type, self._sort_order))) out.write(LINESEP) out.write(LINESEP) self._print_header(out, columns) for stat in self: stat._print(out, columns)
Prints all of the function profiler results to a given file. (stdout by default)
yappi/yappi.py
print_all
gaborbernat/yappi
877
python
def print_all(self, out=sys.stdout, columns={0: ('name', 36), 1: ('ncall', 5), 2: ('tsub', 8), 3: ('ttot', 8), 4: ('tavg', 8)}): '\n \n ' if self.empty(): return for (_, col) in columns.items(): _validate_columns(col[0], COLUMNS_FUNCSTATS) out.write(LINESEP) out.write(('Clock type: %s' % self._clock_type.upper())) out.write(LINESEP) out.write(('Ordered by: %s, %s' % (self._sort_type, self._sort_order))) out.write(LINESEP) out.write(LINESEP) self._print_header(out, columns) for stat in self: stat._print(out, columns)
def print_all(self, out=sys.stdout, columns={0: ('name', 36), 1: ('ncall', 5), 2: ('tsub', 8), 3: ('ttot', 8), 4: ('tavg', 8)}): '\n \n ' if self.empty(): return for (_, col) in columns.items(): _validate_columns(col[0], COLUMNS_FUNCSTATS) out.write(LINESEP) out.write(('Clock type: %s' % self._clock_type.upper())) out.write(LINESEP) out.write(('Ordered by: %s, %s' % (self._sort_type, self._sort_order))) out.write(LINESEP) out.write(LINESEP) self._print_header(out, columns) for stat in self: stat._print(out, columns)<|docstring|>Prints all of the function profiler results to a given file. (stdout by default)<|endoftext|>
7d8f89e48bb43eca91fb135ca73e50a7944ce00c27b04aa28a9b77ca5f767a7d
def print_all(self, out=sys.stdout, columns=None): '\n Prints all of the thread profiler results to a given file. (stdout by default)\n ' if (columns is None): columns = self._DEFAULT_PRINT_COLUMNS if self.empty(): return for (_, col) in columns.items(): _validate_columns(col[0], self._ALL_COLUMNS) out.write(LINESEP) self._print_header(out, columns) for stat in self: stat._print(out, columns)
Prints all of the thread profiler results to a given file. (stdout by default)
yappi/yappi.py
print_all
gaborbernat/yappi
877
python
def print_all(self, out=sys.stdout, columns=None): '\n \n ' if (columns is None): columns = self._DEFAULT_PRINT_COLUMNS if self.empty(): return for (_, col) in columns.items(): _validate_columns(col[0], self._ALL_COLUMNS) out.write(LINESEP) self._print_header(out, columns) for stat in self: stat._print(out, columns)
def print_all(self, out=sys.stdout, columns=None): '\n \n ' if (columns is None): columns = self._DEFAULT_PRINT_COLUMNS if self.empty(): return for (_, col) in columns.items(): _validate_columns(col[0], self._ALL_COLUMNS) out.write(LINESEP) self._print_header(out, columns) for stat in self: stat._print(out, columns)<|docstring|>Prints all of the thread profiler results to a given file. (stdout by default)<|endoftext|>
d0b527c7759dfefa8b2accd26e44a9dadfc70be8d9f91b5d3dda47ee43f1435e
def setUp(self): '\n If no key/cert pair found, generate a new\n key/cert pair and store as a file.\n ' call(['openssl', 'req', '-x509', '-nodes', '-days', '1', '-newkey', 'rsa:2048', '-keyout', TEST_KEY_FILE, '-out', TEST_CERT_FILE, '-subj', TEST_CERT_DN]) p1 = Popen(['openssl', 'x509', '-subject_hash', '-noout'], stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() (hash_name, _unused_error) = p1.communicate(cert_string) self.ca_certpath = os.path.join(TEST_CA_DIR, (hash_name.strip() + '.0')) with open(self.ca_certpath, 'w') as ca_cert: ca_cert.write(cert_string)
If no key/cert pair found, generate a new key/cert pair and store as a file.
test/test_crypto.py
setUp
RoseECooper/ssm
7
python
def setUp(self): '\n If no key/cert pair found, generate a new\n key/cert pair and store as a file.\n ' call(['openssl', 'req', '-x509', '-nodes', '-days', '1', '-newkey', 'rsa:2048', '-keyout', TEST_KEY_FILE, '-out', TEST_CERT_FILE, '-subj', TEST_CERT_DN]) p1 = Popen(['openssl', 'x509', '-subject_hash', '-noout'], stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() (hash_name, _unused_error) = p1.communicate(cert_string) self.ca_certpath = os.path.join(TEST_CA_DIR, (hash_name.strip() + '.0')) with open(self.ca_certpath, 'w') as ca_cert: ca_cert.write(cert_string)
def setUp(self): '\n If no key/cert pair found, generate a new\n key/cert pair and store as a file.\n ' call(['openssl', 'req', '-x509', '-nodes', '-days', '1', '-newkey', 'rsa:2048', '-keyout', TEST_KEY_FILE, '-out', TEST_CERT_FILE, '-subj', TEST_CERT_DN]) p1 = Popen(['openssl', 'x509', '-subject_hash', '-noout'], stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() (hash_name, _unused_error) = p1.communicate(cert_string) self.ca_certpath = os.path.join(TEST_CA_DIR, (hash_name.strip() + '.0')) with open(self.ca_certpath, 'w') as ca_cert: ca_cert.write(cert_string)<|docstring|>If no key/cert pair found, generate a new key/cert pair and store as a file.<|endoftext|>
730c937fc75e5f528df91ace30f82ab5fa1ea7a326b12f74d6d02794b771b530
def tearDown(self): 'Remove temporary files.' os.remove(TEST_CERT_FILE) os.remove(TEST_KEY_FILE) os.remove(self.ca_certpath)
Remove temporary files.
test/test_crypto.py
tearDown
RoseECooper/ssm
7
python
def tearDown(self): os.remove(TEST_CERT_FILE) os.remove(TEST_KEY_FILE) os.remove(self.ca_certpath)
def tearDown(self): os.remove(TEST_CERT_FILE) os.remove(TEST_KEY_FILE) os.remove(self.ca_certpath)<|docstring|>Remove temporary files.<|endoftext|>
10a53171a0dd735999b37c302f2dc1ccba97698eef80b1e14fc3de2364593c63
def test_check_cert_key(self): 'Check that valid cert and key works.' self.assertTrue(check_cert_key(TEST_CERT_FILE, TEST_KEY_FILE), 'Cert and key match but function failed.')
Check that valid cert and key works.
test/test_crypto.py
test_check_cert_key
RoseECooper/ssm
7
python
def test_check_cert_key(self): self.assertTrue(check_cert_key(TEST_CERT_FILE, TEST_KEY_FILE), 'Cert and key match but function failed.')
def test_check_cert_key(self): self.assertTrue(check_cert_key(TEST_CERT_FILE, TEST_KEY_FILE), 'Cert and key match but function failed.')<|docstring|>Check that valid cert and key works.<|endoftext|>
d66befabfec8066f41d5e01a4f9b6fdadc8909e1cfcf660512824ad4f8a443cf
def test_check_cert_key_invalid_paths(self): "Check invalid file paths don't return True." self.assertFalse(check_cert_key('hello', 'hello'), 'Accepted invalid file paths.') self.assertFalse(check_cert_key(TEST_CERT_FILE, 'k'), 'Accepted invalid key path.') self.assertFalse(check_cert_key('c', TEST_KEY_FILE), 'Accepted invalid cert path.')
Check invalid file paths don't return True.
test/test_crypto.py
test_check_cert_key_invalid_paths
RoseECooper/ssm
7
python
def test_check_cert_key_invalid_paths(self): self.assertFalse(check_cert_key('hello', 'hello'), 'Accepted invalid file paths.') self.assertFalse(check_cert_key(TEST_CERT_FILE, 'k'), 'Accepted invalid key path.') self.assertFalse(check_cert_key('c', TEST_KEY_FILE), 'Accepted invalid cert path.')
def test_check_cert_key_invalid_paths(self): self.assertFalse(check_cert_key('hello', 'hello'), 'Accepted invalid file paths.') self.assertFalse(check_cert_key(TEST_CERT_FILE, 'k'), 'Accepted invalid key path.') self.assertFalse(check_cert_key('c', TEST_KEY_FILE), 'Accepted invalid cert path.')<|docstring|>Check invalid file paths don't return True.<|endoftext|>
bae084d842425dc54d16883ed7cad79413019f1bd88c21cf855563528764d901
def test_check_cert_key_arg_order(self): "Check incorrect order of cert and key path args doesn't succeed." self.assertFalse(check_cert_key(TEST_CERT_FILE, TEST_CERT_FILE), 'Accepted certificate as key.') self.assertFalse(check_cert_key(TEST_KEY_FILE, TEST_KEY_FILE), 'Accepted key as cert.') self.assertFalse(check_cert_key(TEST_KEY_FILE, TEST_CERT_FILE), 'Accepted key and cert wrong way round.')
Check incorrect order of cert and key path args doesn't succeed.
test/test_crypto.py
test_check_cert_key_arg_order
RoseECooper/ssm
7
python
def test_check_cert_key_arg_order(self): self.assertFalse(check_cert_key(TEST_CERT_FILE, TEST_CERT_FILE), 'Accepted certificate as key.') self.assertFalse(check_cert_key(TEST_KEY_FILE, TEST_KEY_FILE), 'Accepted key as cert.') self.assertFalse(check_cert_key(TEST_KEY_FILE, TEST_CERT_FILE), 'Accepted key and cert wrong way round.')
def test_check_cert_key_arg_order(self): self.assertFalse(check_cert_key(TEST_CERT_FILE, TEST_CERT_FILE), 'Accepted certificate as key.') self.assertFalse(check_cert_key(TEST_KEY_FILE, TEST_KEY_FILE), 'Accepted key as cert.') self.assertFalse(check_cert_key(TEST_KEY_FILE, TEST_CERT_FILE), 'Accepted key and cert wrong way round.')<|docstring|>Check incorrect order of cert and key path args doesn't succeed.<|endoftext|>
0a8b65565c0b602a5c5285dbec0ddfe899a432709367db9df5dfae2808f4a70f
def test_check_cert_key_invalid_files(self): 'Check behaviour with an invalid cert or key file.' with tempfile.NamedTemporaryFile() as tmp: self.assertFalse(check_cert_key(tmp.name, TEST_KEY_FILE), 'Accepted invalid cert file.') self.assertFalse(check_cert_key(TEST_CERT_FILE, tmp.name), 'Accepted invalid key file.')
Check behaviour with an invalid cert or key file.
test/test_crypto.py
test_check_cert_key_invalid_files
RoseECooper/ssm
7
python
def test_check_cert_key_invalid_files(self): with tempfile.NamedTemporaryFile() as tmp: self.assertFalse(check_cert_key(tmp.name, TEST_KEY_FILE), 'Accepted invalid cert file.') self.assertFalse(check_cert_key(TEST_CERT_FILE, tmp.name), 'Accepted invalid key file.')
def test_check_cert_key_invalid_files(self): with tempfile.NamedTemporaryFile() as tmp: self.assertFalse(check_cert_key(tmp.name, TEST_KEY_FILE), 'Accepted invalid cert file.') self.assertFalse(check_cert_key(TEST_CERT_FILE, tmp.name), 'Accepted invalid key file.')<|docstring|>Check behaviour with an invalid cert or key file.<|endoftext|>
7b84a1496fcd35531aaed9c126bb0a2112b0221cc976a4805271d46e19ec973d
def test_sign(self): "\n I haven't found a good way to test this yet. Each time you sign a\n message, the output has a random element, so you can't compare strings.\n " signed = sign(MSG, TEST_CERT_FILE, TEST_KEY_FILE) if ('MIME-Version' not in signed): self.fail("Didn't get MIME message when signing.") if (MSG not in signed): self.fail('The plaintext should be included in the signed message.') (retrieved_msg, retrieved_dn) = verify(signed, TEST_CA_DIR, False) if (not (retrieved_dn == TEST_CERT_DN)): self.fail("The DN of the verified message didn't match the cert.") if (not (retrieved_msg == MSG)): self.fail("The verified message didn't match the original.")
I haven't found a good way to test this yet. Each time you sign a message, the output has a random element, so you can't compare strings.
test/test_crypto.py
test_sign
RoseECooper/ssm
7
python
def test_sign(self): "\n I haven't found a good way to test this yet. Each time you sign a\n message, the output has a random element, so you can't compare strings.\n " signed = sign(MSG, TEST_CERT_FILE, TEST_KEY_FILE) if ('MIME-Version' not in signed): self.fail("Didn't get MIME message when signing.") if (MSG not in signed): self.fail('The plaintext should be included in the signed message.') (retrieved_msg, retrieved_dn) = verify(signed, TEST_CA_DIR, False) if (not (retrieved_dn == TEST_CERT_DN)): self.fail("The DN of the verified message didn't match the cert.") if (not (retrieved_msg == MSG)): self.fail("The verified message didn't match the original.")
def test_sign(self): "\n I haven't found a good way to test this yet. Each time you sign a\n message, the output has a random element, so you can't compare strings.\n " signed = sign(MSG, TEST_CERT_FILE, TEST_KEY_FILE) if ('MIME-Version' not in signed): self.fail("Didn't get MIME message when signing.") if (MSG not in signed): self.fail('The plaintext should be included in the signed message.') (retrieved_msg, retrieved_dn) = verify(signed, TEST_CA_DIR, False) if (not (retrieved_dn == TEST_CERT_DN)): self.fail("The DN of the verified message didn't match the cert.") if (not (retrieved_msg == MSG)): self.fail("The verified message didn't match the original.")<|docstring|>I haven't found a good way to test this yet. Each time you sign a message, the output has a random element, so you can't compare strings.<|endoftext|>
af342476d6c976ce0e848a7dfcbff4ad075b1314ca008e81825626ba992be197
def test_get_certificate_subject(self): '\n Check that the correct DN is extracted from the cert.\n Check that incorrect input gives an appropriate error.\n ' with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() dn = get_certificate_subject(cert_string) if (not (dn == TEST_CERT_DN)): self.fail("Didn't retrieve correct DN from cert.") try: subj = get_certificate_subject('Rubbish') self.fail(('Returned %s as subject from empty string.' % subj)) except CryptoException: pass try: subj = get_certificate_subject('') self.fail(('Returned %s as subject from empty string.' % subj)) except CryptoException: pass
Check that the correct DN is extracted from the cert. Check that incorrect input gives an appropriate error.
test/test_crypto.py
test_get_certificate_subject
RoseECooper/ssm
7
python
def test_get_certificate_subject(self): '\n Check that the correct DN is extracted from the cert.\n Check that incorrect input gives an appropriate error.\n ' with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() dn = get_certificate_subject(cert_string) if (not (dn == TEST_CERT_DN)): self.fail("Didn't retrieve correct DN from cert.") try: subj = get_certificate_subject('Rubbish') self.fail(('Returned %s as subject from empty string.' % subj)) except CryptoException: pass try: subj = get_certificate_subject() self.fail(('Returned %s as subject from empty string.' % subj)) except CryptoException: pass
def test_get_certificate_subject(self): '\n Check that the correct DN is extracted from the cert.\n Check that incorrect input gives an appropriate error.\n ' with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() dn = get_certificate_subject(cert_string) if (not (dn == TEST_CERT_DN)): self.fail("Didn't retrieve correct DN from cert.") try: subj = get_certificate_subject('Rubbish') self.fail(('Returned %s as subject from empty string.' % subj)) except CryptoException: pass try: subj = get_certificate_subject() self.fail(('Returned %s as subject from empty string.' % subj)) except CryptoException: pass<|docstring|>Check that the correct DN is extracted from the cert. Check that incorrect input gives an appropriate error.<|endoftext|>
6d1f1bc63464ae8be0c5625144093f408aecd8f1b4c2f12507bd014b05b8e005
def test_get_signer_cert(self): '\n Check that the certificate retrieved from the signed message\n matches the certificate used to sign it.\n ' signed_msg = sign(MSG, TEST_CERT_FILE, TEST_KEY_FILE) cert = get_signer_cert(signed_msg) cert = cert[cert.find('-----BEGIN'):] with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() if (cert.strip() != cert_string.strip()): self.fail('Certificate retrieved from signature does not match certificate used to sign.')
Check that the certificate retrieved from the signed message matches the certificate used to sign it.
test/test_crypto.py
test_get_signer_cert
RoseECooper/ssm
7
python
def test_get_signer_cert(self): '\n Check that the certificate retrieved from the signed message\n matches the certificate used to sign it.\n ' signed_msg = sign(MSG, TEST_CERT_FILE, TEST_KEY_FILE) cert = get_signer_cert(signed_msg) cert = cert[cert.find('-----BEGIN'):] with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() if (cert.strip() != cert_string.strip()): self.fail('Certificate retrieved from signature does not match certificate used to sign.')
def test_get_signer_cert(self): '\n Check that the certificate retrieved from the signed message\n matches the certificate used to sign it.\n ' signed_msg = sign(MSG, TEST_CERT_FILE, TEST_KEY_FILE) cert = get_signer_cert(signed_msg) cert = cert[cert.find('-----BEGIN'):] with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() if (cert.strip() != cert_string.strip()): self.fail('Certificate retrieved from signature does not match certificate used to sign.')<|docstring|>Check that the certificate retrieved from the signed message matches the certificate used to sign it.<|endoftext|>
a1d6320fd8c37e5b2f9f1daffbf4b568a7f886f081eca8868cbc0e0ce4e5a92b
def test_encrypt(self): '\n Not a correct test yet.\n ' encrypted = encrypt(MSG, TEST_CERT_FILE) if ('MIME-Version' not in encrypted): self.fail('Encrypted message is not MIME') decrypted = decrypt(encrypted, TEST_CERT_FILE, TEST_KEY_FILE) if (decrypted != MSG): self.fail("Encrypted message wasn't decrypted successfully.") try: encrypted = encrypt(MSG, TEST_CERT_FILE, 'aes1024') except CryptoException: pass
Not a correct test yet.
test/test_crypto.py
test_encrypt
RoseECooper/ssm
7
python
def test_encrypt(self): '\n \n ' encrypted = encrypt(MSG, TEST_CERT_FILE) if ('MIME-Version' not in encrypted): self.fail('Encrypted message is not MIME') decrypted = decrypt(encrypted, TEST_CERT_FILE, TEST_KEY_FILE) if (decrypted != MSG): self.fail("Encrypted message wasn't decrypted successfully.") try: encrypted = encrypt(MSG, TEST_CERT_FILE, 'aes1024') except CryptoException: pass
def test_encrypt(self): '\n \n ' encrypted = encrypt(MSG, TEST_CERT_FILE) if ('MIME-Version' not in encrypted): self.fail('Encrypted message is not MIME') decrypted = decrypt(encrypted, TEST_CERT_FILE, TEST_KEY_FILE) if (decrypted != MSG): self.fail("Encrypted message wasn't decrypted successfully.") try: encrypted = encrypt(MSG, TEST_CERT_FILE, 'aes1024') except CryptoException: pass<|docstring|>Not a correct test yet.<|endoftext|>
a9bef7b9489fefe66eb1bd6c311596cfa1a57959acaf421281f22af9a92f1297
def test_decrypt(self): '\n Check that the encrypted message can be decrypted and returns the\n original message.\n ' encrypted = encrypt(MSG, TEST_CERT_FILE) decrypted = decrypt(encrypted, TEST_CERT_FILE, TEST_KEY_FILE) if (decrypted.strip() != MSG): self.fail('Failed to decrypt message.')
Check that the encrypted message can be decrypted and returns the original message.
test/test_crypto.py
test_decrypt
RoseECooper/ssm
7
python
def test_decrypt(self): '\n Check that the encrypted message can be decrypted and returns the\n original message.\n ' encrypted = encrypt(MSG, TEST_CERT_FILE) decrypted = decrypt(encrypted, TEST_CERT_FILE, TEST_KEY_FILE) if (decrypted.strip() != MSG): self.fail('Failed to decrypt message.')
def test_decrypt(self): '\n Check that the encrypted message can be decrypted and returns the\n original message.\n ' encrypted = encrypt(MSG, TEST_CERT_FILE) decrypted = decrypt(encrypted, TEST_CERT_FILE, TEST_KEY_FILE) if (decrypted.strip() != MSG): self.fail('Failed to decrypt message.')<|docstring|>Check that the encrypted message can be decrypted and returns the original message.<|endoftext|>
3ea60f8cf507738f51e03b224252b88a25be42aca57c72c1915c2bb78ff4419e
def test_verify_cert(self): "\n Check that the test certificate is verified against itself, and that\n it doesn't verify without the correct CA directory. Check that a\n nonsense string isn't verified.\n\n I can't check the CRLs of a self-signed certificate easily.\n " with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() if (not verify_cert(cert_string, TEST_CA_DIR, False)): self.fail('The self signed certificate should validate againstitself in a CA directory.') if verify_cert(cert_string, '/var/tmp', False): self.fail("The verify method isn't checking the CA dir correctly.") if verify_cert('bloblo', TEST_CA_DIR, False): self.fail('Nonsense successfully verified.') if verify_cert(cert_string, TEST_CA_DIR, True): self.fail(('The self-signed certificate should not be verified ' + 'if CRLs are checked.')) try: if verify_cert(None, TEST_CA_DIR, False): self.fail('Verified None rather than certificate string.') except CryptoException: pass
Check that the test certificate is verified against itself, and that it doesn't verify without the correct CA directory. Check that a nonsense string isn't verified. I can't check the CRLs of a self-signed certificate easily.
test/test_crypto.py
test_verify_cert
RoseECooper/ssm
7
python
def test_verify_cert(self): "\n Check that the test certificate is verified against itself, and that\n it doesn't verify without the correct CA directory. Check that a\n nonsense string isn't verified.\n\n I can't check the CRLs of a self-signed certificate easily.\n " with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() if (not verify_cert(cert_string, TEST_CA_DIR, False)): self.fail('The self signed certificate should validate againstitself in a CA directory.') if verify_cert(cert_string, '/var/tmp', False): self.fail("The verify method isn't checking the CA dir correctly.") if verify_cert('bloblo', TEST_CA_DIR, False): self.fail('Nonsense successfully verified.') if verify_cert(cert_string, TEST_CA_DIR, True): self.fail(('The self-signed certificate should not be verified ' + 'if CRLs are checked.')) try: if verify_cert(None, TEST_CA_DIR, False): self.fail('Verified None rather than certificate string.') except CryptoException: pass
def test_verify_cert(self): "\n Check that the test certificate is verified against itself, and that\n it doesn't verify without the correct CA directory. Check that a\n nonsense string isn't verified.\n\n I can't check the CRLs of a self-signed certificate easily.\n " with open(TEST_CERT_FILE, 'r') as test_cert: cert_string = test_cert.read() if (not verify_cert(cert_string, TEST_CA_DIR, False)): self.fail('The self signed certificate should validate againstitself in a CA directory.') if verify_cert(cert_string, '/var/tmp', False): self.fail("The verify method isn't checking the CA dir correctly.") if verify_cert('bloblo', TEST_CA_DIR, False): self.fail('Nonsense successfully verified.') if verify_cert(cert_string, TEST_CA_DIR, True): self.fail(('The self-signed certificate should not be verified ' + 'if CRLs are checked.')) try: if verify_cert(None, TEST_CA_DIR, False): self.fail('Verified None rather than certificate string.') except CryptoException: pass<|docstring|>Check that the test certificate is verified against itself, and that it doesn't verify without the correct CA directory. Check that a nonsense string isn't verified. I can't check the CRLs of a self-signed certificate easily.<|endoftext|>
c2c177d5111b48797a1e654011902e9cd39406e51a76413d03dd8b1dd0d489f0
def test_message_tampering(self): 'Test that a tampered message is not accepted as valid.' signed_message = sign(MSG, TEST_CERT_FILE, TEST_KEY_FILE) tampered_message = signed_message.replace(MSG, 'Spam') (verified_message, verified_signer) = verify(signed_message, TEST_CA_DIR, False) self.assertEqual(verified_message, MSG) self.assertEqual(verified_signer, TEST_CERT_DN) self.assertRaises(CryptoException, verify, tampered_message, TEST_CA_DIR, False)
Test that a tampered message is not accepted as valid.
test/test_crypto.py
test_message_tampering
RoseECooper/ssm
7
python
def test_message_tampering(self): signed_message = sign(MSG, TEST_CERT_FILE, TEST_KEY_FILE) tampered_message = signed_message.replace(MSG, 'Spam') (verified_message, verified_signer) = verify(signed_message, TEST_CA_DIR, False) self.assertEqual(verified_message, MSG) self.assertEqual(verified_signer, TEST_CERT_DN) self.assertRaises(CryptoException, verify, tampered_message, TEST_CA_DIR, False)
def test_message_tampering(self): signed_message = sign(MSG, TEST_CERT_FILE, TEST_KEY_FILE) tampered_message = signed_message.replace(MSG, 'Spam') (verified_message, verified_signer) = verify(signed_message, TEST_CA_DIR, False) self.assertEqual(verified_message, MSG) self.assertEqual(verified_signer, TEST_CERT_DN) self.assertRaises(CryptoException, verify, tampered_message, TEST_CA_DIR, False)<|docstring|>Test that a tampered message is not accepted as valid.<|endoftext|>
f508c3e71ab5a335bad88831d1cd2254ba50f46914b3ab09e497f79307e5be27
def chunks(l, n): 'Yield successive n-sized chunks from l.' for i in range(0, len(l), n): (yield l[i:(i + n)])
Yield successive n-sized chunks from l.
yield.py
chunks
robertbyers1111/python
0
python
def chunks(l, n): for i in range(0, len(l), n): (yield l[i:(i + n)])
def chunks(l, n): for i in range(0, len(l), n): (yield l[i:(i + n)])<|docstring|>Yield successive n-sized chunks from l.<|endoftext|>
b87aad031a924ae68dac9c6216a7a4fb5bcab126af6b9a56f09bcdc1edc514b7
def get_min_realtime(stock_code): '\n ่Žทๅ–่‚ก็ฅจ1ๅˆ†้’Ÿๅฎžๆ—ถ่กŒๆƒ…ใ€‚\n \n Parameters\n ----------\n stock_code\n ่‚ก็ฅจไปฃ็ \n \n Returns\n -------\n DataFrame\n ่‹ฅๅฝ“ๆ—ฅๆ— ๆ•ฐๆฎ,ๅˆ™่ฟ”ๅ›žNone\n ' r = redis.Redis(connection_pool=pool) data = r.hgetall(':'.join(['KLine', '1Min', stock_code, dt.datetime.today().strftime('%Y%m%d')])) df = pd.DataFrame.from_records([json.loads(val) for (key, val) in data.items()]) return df.sort_values('Time', ascending=True)
่Žทๅ–่‚ก็ฅจ1ๅˆ†้’Ÿๅฎžๆ—ถ่กŒๆƒ…ใ€‚ Parameters ---------- stock_code ่‚ก็ฅจไปฃ็  Returns ------- DataFrame ่‹ฅๅฝ“ๆ—ฅๆ— ๆ•ฐๆฎ,ๅˆ™่ฟ”ๅ›žNone
api_realtime.py
get_min_realtime
challenger-zpp/dataflow
0
python
def get_min_realtime(stock_code): '\n ่Žทๅ–่‚ก็ฅจ1ๅˆ†้’Ÿๅฎžๆ—ถ่กŒๆƒ…ใ€‚\n \n Parameters\n ----------\n stock_code\n ่‚ก็ฅจไปฃ็ \n \n Returns\n -------\n DataFrame\n ่‹ฅๅฝ“ๆ—ฅๆ— ๆ•ฐๆฎ,ๅˆ™่ฟ”ๅ›žNone\n ' r = redis.Redis(connection_pool=pool) data = r.hgetall(':'.join(['KLine', '1Min', stock_code, dt.datetime.today().strftime('%Y%m%d')])) df = pd.DataFrame.from_records([json.loads(val) for (key, val) in data.items()]) return df.sort_values('Time', ascending=True)
def get_min_realtime(stock_code): '\n ่Žทๅ–่‚ก็ฅจ1ๅˆ†้’Ÿๅฎžๆ—ถ่กŒๆƒ…ใ€‚\n \n Parameters\n ----------\n stock_code\n ่‚ก็ฅจไปฃ็ \n \n Returns\n -------\n DataFrame\n ่‹ฅๅฝ“ๆ—ฅๆ— ๆ•ฐๆฎ,ๅˆ™่ฟ”ๅ›žNone\n ' r = redis.Redis(connection_pool=pool) data = r.hgetall(':'.join(['KLine', '1Min', stock_code, dt.datetime.today().strftime('%Y%m%d')])) df = pd.DataFrame.from_records([json.loads(val) for (key, val) in data.items()]) return df.sort_values('Time', ascending=True)<|docstring|>่Žทๅ–่‚ก็ฅจ1ๅˆ†้’Ÿๅฎžๆ—ถ่กŒๆƒ…ใ€‚ Parameters ---------- stock_code ่‚ก็ฅจไปฃ็  Returns ------- DataFrame ่‹ฅๅฝ“ๆ—ฅๆ— ๆ•ฐๆฎ,ๅˆ™่ฟ”ๅ›žNone<|endoftext|>
fd6ce0900221b5b19dc4e0ca3c23865f3af4c44e6f929ff20dade41cb04abf47
def read_dataset(path, sep): '\n path: path to dataset file\n return: Dataframe -> pandas type\n ' return pd.read_csv(path, sep=sep)
path: path to dataset file return: Dataframe -> pandas type
brasileirao/main.py
read_dataset
Vitoraugustoliveira/python-tutorial
0
python
def read_dataset(path, sep): '\n path: path to dataset file\n return: Dataframe -> pandas type\n ' return pd.read_csv(path, sep=sep)
def read_dataset(path, sep): '\n path: path to dataset file\n return: Dataframe -> pandas type\n ' return pd.read_csv(path, sep=sep)<|docstring|>path: path to dataset file return: Dataframe -> pandas type<|endoftext|>
9738670fa6edb27cc344e841fe056f41d825a0ae24d23dba04521c503ce16b5b
def try_add(self, item): '\n Tries to add an item to the inventory.\n\n Returns True on success otherwise False.\n ' if (not self.has_room_for_item(item)): return False else: if item.has('stacker'): self.try_stack_item(item) if (item.stacker.size == 0): item.mover.try_remove_from_dungeon() return True self.add_item_no_stack(item) return True
Tries to add an item to the inventory. Returns True on success otherwise False.
inventory.py
try_add
co/TheLastRogue
8
python
def try_add(self, item): '\n Tries to add an item to the inventory.\n\n Returns True on success otherwise False.\n ' if (not self.has_room_for_item(item)): return False else: if item.has('stacker'): self.try_stack_item(item) if (item.stacker.size == 0): item.mover.try_remove_from_dungeon() return True self.add_item_no_stack(item) return True
def try_add(self, item): '\n Tries to add an item to the inventory.\n\n Returns True on success otherwise False.\n ' if (not self.has_room_for_item(item)): return False else: if item.has('stacker'): self.try_stack_item(item) if (item.stacker.size == 0): item.mover.try_remove_from_dungeon() return True self.add_item_no_stack(item) return True<|docstring|>Tries to add an item to the inventory. Returns True on success otherwise False.<|endoftext|>
039026c80cfda0eadf265d5b62038481b987eed76ec9c7469baa67d9ecae9056
def has_room_for_item(self, other_item): '\n Returns true if the inventory has room for another item.\n ' stack_successful = False if other_item.has('stacker'): stack_successful = self.can_stack_new_item(other_item) return (stack_successful or ((len(self._items) + 1) <= self._item_capacity))
Returns true if the inventory has room for another item.
inventory.py
has_room_for_item
co/TheLastRogue
8
python
def has_room_for_item(self, other_item): '\n \n ' stack_successful = False if other_item.has('stacker'): stack_successful = self.can_stack_new_item(other_item) return (stack_successful or ((len(self._items) + 1) <= self._item_capacity))
def has_room_for_item(self, other_item): '\n \n ' stack_successful = False if other_item.has('stacker'): stack_successful = self.can_stack_new_item(other_item) return (stack_successful or ((len(self._items) + 1) <= self._item_capacity))<|docstring|>Returns true if the inventory has room for another item.<|endoftext|>
a32a34750268d19831c517d785831e74e887018948c44ed28d195a36970bd47c
def can_drop_item(self, item): '\n Returns true if it is a legal action to drop the item.\n ' return item.mover.can_move(self.parent.position, self.parent.dungeon_level)
Returns true if it is a legal action to drop the item.
inventory.py
can_drop_item
co/TheLastRogue
8
python
def can_drop_item(self, item): '\n \n ' return item.mover.can_move(self.parent.position, self.parent.dungeon_level)
def can_drop_item(self, item): '\n \n ' return item.mover.can_move(self.parent.position, self.parent.dungeon_level)<|docstring|>Returns true if it is a legal action to drop the item.<|endoftext|>
2bbc9fcf807126356e094de5add0fe1543d1eb41f39dc6427f9978c46ac9c574
def try_drop_item(self, item): '\n Tries to drop an item to the ground.\n\n Returns True on success otherwise False.\n ' drop_successful = item.mover.try_move(self.parent.position.value, self.parent.dungeon_level.value) if drop_successful: self.remove_item(item) return drop_successful
Tries to drop an item to the ground. Returns True on success otherwise False.
inventory.py
try_drop_item
co/TheLastRogue
8
python
def try_drop_item(self, item): '\n Tries to drop an item to the ground.\n\n Returns True on success otherwise False.\n ' drop_successful = item.mover.try_move(self.parent.position.value, self.parent.dungeon_level.value) if drop_successful: self.remove_item(item) return drop_successful
def try_drop_item(self, item): '\n Tries to drop an item to the ground.\n\n Returns True on success otherwise False.\n ' drop_successful = item.mover.try_move(self.parent.position.value, self.parent.dungeon_level.value) if drop_successful: self.remove_item(item) return drop_successful<|docstring|>Tries to drop an item to the ground. Returns True on success otherwise False.<|endoftext|>
92241cf18becd105eda0227a8e9bbc0b2cb7c1f2d0e1db576c281318ec437f19
def remove_item(self, item): '\n Removes item from the inventory.\n ' self._items.remove(item)
Removes item from the inventory.
inventory.py
remove_item
co/TheLastRogue
8
python
def remove_item(self, item): '\n \n ' self._items.remove(item)
def remove_item(self, item): '\n \n ' self._items.remove(item)<|docstring|>Removes item from the inventory.<|endoftext|>
c9e16c19db69f2a9e854a3621e0802184348cb1b92a1578972933b9cbaee9e7a
def remove_one_item_from_stack(self, item): '\n Removes one instance of an item from the inventory.\n\n Works like remove_item but does not remove an entire stack of items.\n ' if item.has('stacker'): item.stacker.size -= 1 if (item.stacker.size <= 0): self._items.remove(item) else: self._items.remove(item)
Removes one instance of an item from the inventory. Works like remove_item but does not remove an entire stack of items.
inventory.py
remove_one_item_from_stack
co/TheLastRogue
8
python
def remove_one_item_from_stack(self, item): '\n Removes one instance of an item from the inventory.\n\n Works like remove_item but does not remove an entire stack of items.\n ' if item.has('stacker'): item.stacker.size -= 1 if (item.stacker.size <= 0): self._items.remove(item) else: self._items.remove(item)
def remove_one_item_from_stack(self, item): '\n Removes one instance of an item from the inventory.\n\n Works like remove_item but does not remove an entire stack of items.\n ' if item.has('stacker'): item.stacker.size -= 1 if (item.stacker.size <= 0): self._items.remove(item) else: self._items.remove(item)<|docstring|>Removes one instance of an item from the inventory. Works like remove_item but does not remove an entire stack of items.<|endoftext|>
b97c16ec4e3600c87858c052d98763f0dff32600822d156d13b6fdbaa1557379
def has_item(self, item): '\n Returns true if the item instance is in the inventory, false otherwise.\n ' return (item in self._items)
Returns true if the item instance is in the inventory, false otherwise.
inventory.py
has_item
co/TheLastRogue
8
python
def has_item(self, item): '\n \n ' return (item in self._items)
def has_item(self, item): '\n \n ' return (item in self._items)<|docstring|>Returns true if the item instance is in the inventory, false otherwise.<|endoftext|>
bf1afa36923152ba399377bcaebc5d20059f76b33aaa4d120c3836a0a1030525
def has_item_of_type(self, item_type): '\n Returns true if the item instance is in the inventory, false otherwise.\n ' return any([item for item in self._items if (item.item_type.value == item_type)])
Returns true if the item instance is in the inventory, false otherwise.
inventory.py
has_item_of_type
co/TheLastRogue
8
python
def has_item_of_type(self, item_type): '\n \n ' return any([item for item in self._items if (item.item_type.value == item_type)])
def has_item_of_type(self, item_type): '\n \n ' return any([item for item in self._items if (item.item_type.value == item_type)])<|docstring|>Returns true if the item instance is in the inventory, false otherwise.<|endoftext|>
bde5e4e4f3f5a2fbece43a37428a5a82c50c1e0ab89423a705da6d726a18335b
def is_empty(self): '\n Returns true the inventory is empty, false otherwise.\n ' return (len(self._items) <= 0)
Returns true the inventory is empty, false otherwise.
inventory.py
is_empty
co/TheLastRogue
8
python
def is_empty(self): '\n \n ' return (len(self._items) <= 0)
def is_empty(self): '\n \n ' return (len(self._items) <= 0)<|docstring|>Returns true the inventory is empty, false otherwise.<|endoftext|>
b5dc224d073a6155fb83137a6c58efa61da6db85a343ffe678d980169231642f
def items_of_equipment_type(self, type_): '\n Returns a list of all items in the inventory of the given type.\n ' return [item for item in self._items if (item.has('equipment_type') and (item.equipment_type.value == type_))]
Returns a list of all items in the inventory of the given type.
inventory.py
items_of_equipment_type
co/TheLastRogue
8
python
def items_of_equipment_type(self, type_): '\n \n ' return [item for item in self._items if (item.has('equipment_type') and (item.equipment_type.value == type_))]
def items_of_equipment_type(self, type_): '\n \n ' return [item for item in self._items if (item.has('equipment_type') and (item.equipment_type.value == type_))]<|docstring|>Returns a list of all items in the inventory of the given type.<|endoftext|>
279f9c7951774ff269f5de954ede99e28330ee41d2092285caa4303f42968a8c
def __init__(self, iterator): '\n Initialize your data structure here.\n :type iterator: Iterator\n ' self.iter = iterator self.currentPeak = (self.iter.next() if self.iter.hasNext() else None)
Initialize your data structure here. :type iterator: Iterator
leetcode.com/python/284_Peeking_Iterator.py
__init__
its-sushant/coding-interview-gym
713
python
def __init__(self, iterator): '\n Initialize your data structure here.\n :type iterator: Iterator\n ' self.iter = iterator self.currentPeak = (self.iter.next() if self.iter.hasNext() else None)
def __init__(self, iterator): '\n Initialize your data structure here.\n :type iterator: Iterator\n ' self.iter = iterator self.currentPeak = (self.iter.next() if self.iter.hasNext() else None)<|docstring|>Initialize your data structure here. :type iterator: Iterator<|endoftext|>
96f1d91cb0f4aad94f7c43691ab2f8d919642a60b8d10811289a01c9418f2b53
def peek(self): '\n Returns the next element in the iteration without advancing the iterator.\n :rtype: int\n ' return self.currentPeak
Returns the next element in the iteration without advancing the iterator. :rtype: int
leetcode.com/python/284_Peeking_Iterator.py
peek
its-sushant/coding-interview-gym
713
python
def peek(self): '\n Returns the next element in the iteration without advancing the iterator.\n :rtype: int\n ' return self.currentPeak
def peek(self): '\n Returns the next element in the iteration without advancing the iterator.\n :rtype: int\n ' return self.currentPeak<|docstring|>Returns the next element in the iteration without advancing the iterator. :rtype: int<|endoftext|>
4d30823897eaf5e8ac5c091591488061b227fb1c2b8e44c30f0df8607d6c7607
def next(self): '\n :rtype: int\n ' returnValue = self.currentPeak self.currentPeak = (self.iter.next() if self.iter.hasNext() else None) return returnValue
:rtype: int
leetcode.com/python/284_Peeking_Iterator.py
next
its-sushant/coding-interview-gym
713
python
def next(self): '\n \n ' returnValue = self.currentPeak self.currentPeak = (self.iter.next() if self.iter.hasNext() else None) return returnValue
def next(self): '\n \n ' returnValue = self.currentPeak self.currentPeak = (self.iter.next() if self.iter.hasNext() else None) return returnValue<|docstring|>:rtype: int<|endoftext|>
73f406dfbf7e7f1e640908c1ac6009ed8b49fce261a37c2ea26d6a357f3e3a3b
def hasNext(self): '\n :rtype: bool\n ' return (True if self.currentPeak else False)
:rtype: bool
leetcode.com/python/284_Peeking_Iterator.py
hasNext
its-sushant/coding-interview-gym
713
python
def hasNext(self): '\n \n ' return (True if self.currentPeak else False)
def hasNext(self): '\n \n ' return (True if self.currentPeak else False)<|docstring|>:rtype: bool<|endoftext|>
ecec87208df3624d0bc8374b3f8658c5e022376345f8ca1a1ecff90e9efd0c15
def build(self): 'Avoid warning on build step' pass
Avoid warning on build step
conanfile.py
build
AndrewGaspar/variant-lite
218
python
def build(self): pass
def build(self): pass<|docstring|>Avoid warning on build step<|endoftext|>
3048d78eb3558c6ffc8aa83818d746210f1278312aa53d5d5abf4f15eb6377ad
def package(self): 'Run CMake install' cmake = CMake(self) cmake.definitions['VARIANT_LITE_OPT_BUILD_TESTS'] = 'OFF' cmake.definitions['VARIANT_LITE_OPT_BUILD_EXAMPLES'] = 'OFF' cmake.configure() cmake.install()
Run CMake install
conanfile.py
package
AndrewGaspar/variant-lite
218
python
def package(self): cmake = CMake(self) cmake.definitions['VARIANT_LITE_OPT_BUILD_TESTS'] = 'OFF' cmake.definitions['VARIANT_LITE_OPT_BUILD_EXAMPLES'] = 'OFF' cmake.configure() cmake.install()
def package(self): cmake = CMake(self) cmake.definitions['VARIANT_LITE_OPT_BUILD_TESTS'] = 'OFF' cmake.definitions['VARIANT_LITE_OPT_BUILD_EXAMPLES'] = 'OFF' cmake.configure() cmake.install()<|docstring|>Run CMake install<|endoftext|>
265bfe7101e704385e0812919375ae3cc629c46645963b54e66b9082b3994af2
def zigzagLevelOrder(self, root): '\n :type root: TreeNode\n :rtype: List[List[int]]\n ' if (not root): return [] (currnode, nextnode, res) = ([root], [], []) (start, end, count) = (0, 0, 1) reverse = False while currnode: (start, end) = (0, count) (level, count) = ([], 0) for i in range(start, end): if currnode[i].left: nextnode.append(currnode[i].left) count += 1 if currnode[i].right: nextnode.append(currnode[i].right) count += 1 level.append(currnode[i].val) if reverse: level.reverse() reverse = False elif (not reverse): reverse = True res.append(level) (currnode, nextnode) = (nextnode, []) return res
:type root: TreeNode :rtype: List[List[int]]
LeetCode/2018-12-29-103-Binary-Tree-Zigzag-Level-Order-Traversal.py
zigzagLevelOrder
HeRuivio/Algorithm
5
python
def zigzagLevelOrder(self, root): '\n :type root: TreeNode\n :rtype: List[List[int]]\n ' if (not root): return [] (currnode, nextnode, res) = ([root], [], []) (start, end, count) = (0, 0, 1) reverse = False while currnode: (start, end) = (0, count) (level, count) = ([], 0) for i in range(start, end): if currnode[i].left: nextnode.append(currnode[i].left) count += 1 if currnode[i].right: nextnode.append(currnode[i].right) count += 1 level.append(currnode[i].val) if reverse: level.reverse() reverse = False elif (not reverse): reverse = True res.append(level) (currnode, nextnode) = (nextnode, []) return res
def zigzagLevelOrder(self, root): '\n :type root: TreeNode\n :rtype: List[List[int]]\n ' if (not root): return [] (currnode, nextnode, res) = ([root], [], []) (start, end, count) = (0, 0, 1) reverse = False while currnode: (start, end) = (0, count) (level, count) = ([], 0) for i in range(start, end): if currnode[i].left: nextnode.append(currnode[i].left) count += 1 if currnode[i].right: nextnode.append(currnode[i].right) count += 1 level.append(currnode[i].val) if reverse: level.reverse() reverse = False elif (not reverse): reverse = True res.append(level) (currnode, nextnode) = (nextnode, []) return res<|docstring|>:type root: TreeNode :rtype: List[List[int]]<|endoftext|>
faf52fb3fbab1c97517bda16b856b9084284a0a31523791edd479f9dd311fde8
def zigzagLevelOrder(self, root): '\n :type root: TreeNode\n :rtype: List[List[int]]\n ' if (not root): return [] (reverse, res, queue) = (False, [], deque()) queue.append(root) while queue: (nums, level) = (len(queue), []) for _ in range(0, nums): current = queue.popleft() if current.left: queue.append(current.left) if current.right: queue.append(current.right) level.append(current.val) if reverse: level.reverse() reverse = False elif (not reverse): reverse = True res.append(level) return res
:type root: TreeNode :rtype: List[List[int]]
LeetCode/2018-12-29-103-Binary-Tree-Zigzag-Level-Order-Traversal.py
zigzagLevelOrder
HeRuivio/Algorithm
5
python
def zigzagLevelOrder(self, root): '\n :type root: TreeNode\n :rtype: List[List[int]]\n ' if (not root): return [] (reverse, res, queue) = (False, [], deque()) queue.append(root) while queue: (nums, level) = (len(queue), []) for _ in range(0, nums): current = queue.popleft() if current.left: queue.append(current.left) if current.right: queue.append(current.right) level.append(current.val) if reverse: level.reverse() reverse = False elif (not reverse): reverse = True res.append(level) return res
def zigzagLevelOrder(self, root): '\n :type root: TreeNode\n :rtype: List[List[int]]\n ' if (not root): return [] (reverse, res, queue) = (False, [], deque()) queue.append(root) while queue: (nums, level) = (len(queue), []) for _ in range(0, nums): current = queue.popleft() if current.left: queue.append(current.left) if current.right: queue.append(current.right) level.append(current.val) if reverse: level.reverse() reverse = False elif (not reverse): reverse = True res.append(level) return res<|docstring|>:type root: TreeNode :rtype: List[List[int]]<|endoftext|>
c71387057ed6e2af1b6ea9cabce37c587af5fdc04bdb79349a5fcca0e6291e69
def gelu(x): "Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different\n (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) *\n (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n " return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415
src/modules.py
gelu
salesforce/CoSeRec
0
python
def gelu(x): "Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different\n (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) *\n (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n " return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
def gelu(x): "Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different\n (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) *\n (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n " return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))<|docstring|>Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415<|endoftext|>
55f3935f38eccda49c1865b2171bc7e39d386f6cb0ba6f95300db8c397d630ae
def __init__(self, hidden_size, eps=1e-12): 'Construct a layernorm module in the TF style (epsilon inside the square root).\n ' super(LayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps
Construct a layernorm module in the TF style (epsilon inside the square root).
src/modules.py
__init__
salesforce/CoSeRec
0
python
def __init__(self, hidden_size, eps=1e-12): '\n ' super(LayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps
def __init__(self, hidden_size, eps=1e-12): '\n ' super(LayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps<|docstring|>Construct a layernorm module in the TF style (epsilon inside the square root).<|endoftext|>
211d9175278821f7f809c2c2ef18d259b0e9eae2b007a77ce972b356e13ccf0b
def get_child_states(self, tree): '\n Get c and h of all children\n :param tree:\n :return: (tuple)\n child_c: (num_children, 1, mem_dim)\n child_h: (num_children, 1, mem_dim)\n ' if (tree.num_children == 0): child_c = Var(torch.zeros(1, 1, self.mem_dim)) child_h = Var(torch.zeros(1, 1, self.mem_dim)) else: child_c = Var(torch.Tensor(tree.num_children, 1, self.mem_dim)) child_h = Var(torch.Tensor(tree.num_children, 1, self.mem_dim)) for idx in range(tree.num_children): child_c[idx] = tree.children[idx].state[0] child_h[idx] = tree.children[idx].state[1] return (child_c, child_h)
Get c and h of all children :param tree: :return: (tuple) child_c: (num_children, 1, mem_dim) child_h: (num_children, 1, mem_dim)
treelstm/model.py
get_child_states
ram-g-athreya/RNN-Question-Answering
7
python
def get_child_states(self, tree): '\n Get c and h of all children\n :param tree:\n :return: (tuple)\n child_c: (num_children, 1, mem_dim)\n child_h: (num_children, 1, mem_dim)\n ' if (tree.num_children == 0): child_c = Var(torch.zeros(1, 1, self.mem_dim)) child_h = Var(torch.zeros(1, 1, self.mem_dim)) else: child_c = Var(torch.Tensor(tree.num_children, 1, self.mem_dim)) child_h = Var(torch.Tensor(tree.num_children, 1, self.mem_dim)) for idx in range(tree.num_children): child_c[idx] = tree.children[idx].state[0] child_h[idx] = tree.children[idx].state[1] return (child_c, child_h)
def get_child_states(self, tree): '\n Get c and h of all children\n :param tree:\n :return: (tuple)\n child_c: (num_children, 1, mem_dim)\n child_h: (num_children, 1, mem_dim)\n ' if (tree.num_children == 0): child_c = Var(torch.zeros(1, 1, self.mem_dim)) child_h = Var(torch.zeros(1, 1, self.mem_dim)) else: child_c = Var(torch.Tensor(tree.num_children, 1, self.mem_dim)) child_h = Var(torch.Tensor(tree.num_children, 1, self.mem_dim)) for idx in range(tree.num_children): child_c[idx] = tree.children[idx].state[0] child_h[idx] = tree.children[idx].state[1] return (child_c, child_h)<|docstring|>Get c and h of all children :param tree: :return: (tuple) child_c: (num_children, 1, mem_dim) child_h: (num_children, 1, mem_dim)<|endoftext|>
f851b752d7c086bdf8fad119cd38c71ff3625763980e74ccd822a099f6e0c817
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl'], extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False) -> Union[(StructType, DataType)]: '\n The subscription resource is used to define a push-based subscription from a\n server to another system. Once a subscription is registered with the server,\n the server checks every resource that is created or updated, and if the\n resource matches the given criteria, it sends a message on the defined\n "channel" so that another system can take an appropriate action.\n\n\n id: Unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the element and that modifies the understanding of the element\n in which it is contained and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer can define an extension, there is a set of requirements that SHALL\n be met as part of the definition of the extension. Applications processing a\n resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n type: The type of channel to send notifications on.\n\n endpoint: The url that describes the actual end-point to send messages to.\n\n payload: The mime type to send the payload in - either application/fhir+xml, or\n application/fhir+json. If the payload is not present, then there is no payload\n in the notification, just a notification. The mime type "text/plain" may also\n be used for Email and SMS subscriptions.\n\n header: Additional headers / information to send as part of the notification.\n\n ' from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.simple_types.url import urlSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema if ((max_recursion_limit and (nesting_list.count('Subscription_Channel') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['Subscription_Channel']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('type', StringType(), True), StructField('endpoint', urlSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('payload', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('header', ArrayType(StringType()), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
The subscription resource is used to define a push-based subscription from a server to another system. Once a subscription is registered with the server, the server checks every resource that is created or updated, and if the resource matches the given criteria, it sends a message on the defined "channel" so that another system can take an appropriate action. id: Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. extension: May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). type: The type of channel to send notifications on. endpoint: The url that describes the actual end-point to send messages to. payload: The mime type to send the payload in - either application/fhir+xml, or application/fhir+json. If the payload is not present, then there is no payload in the notification, just a notification. The mime type "text/plain" may also be used for Email and SMS subscriptions. header: Additional headers / information to send as part of the notification.
spark_fhir_schemas/r4/complex_types/subscription_channel.py
get_schema
imranq2/SparkFhirSchemas
2
python
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl'], extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False) -> Union[(StructType, DataType)]: '\n The subscription resource is used to define a push-based subscription from a\n server to another system. Once a subscription is registered with the server,\n the server checks every resource that is created or updated, and if the\n resource matches the given criteria, it sends a message on the defined\n "channel" so that another system can take an appropriate action.\n\n\n id: Unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the element and that modifies the understanding of the element\n in which it is contained and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer can define an extension, there is a set of requirements that SHALL\n be met as part of the definition of the extension. Applications processing a\n resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n type: The type of channel to send notifications on.\n\n endpoint: The url that describes the actual end-point to send messages to.\n\n payload: The mime type to send the payload in - either application/fhir+xml, or\n application/fhir+json. If the payload is not present, then there is no payload\n in the notification, just a notification. The mime type "text/plain" may also\n be used for Email and SMS subscriptions.\n\n header: Additional headers / information to send as part of the notification.\n\n ' from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.simple_types.url import urlSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema if ((max_recursion_limit and (nesting_list.count('Subscription_Channel') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['Subscription_Channel']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('type', StringType(), True), StructField('endpoint', urlSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('payload', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('header', ArrayType(StringType()), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl'], extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False) -> Union[(StructType, DataType)]: '\n The subscription resource is used to define a push-based subscription from a\n server to another system. Once a subscription is registered with the server,\n the server checks every resource that is created or updated, and if the\n resource matches the given criteria, it sends a message on the defined\n "channel" so that another system can take an appropriate action.\n\n\n id: Unique id for the element within a resource (for internal references). This\n may be any string value that does not contain spaces.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the element. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the element and that modifies the understanding of the element\n in which it is contained and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer can define an extension, there is a set of requirements that SHALL\n be met as part of the definition of the extension. Applications processing a\n resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n type: The type of channel to send notifications on.\n\n endpoint: The url that describes the actual end-point to send messages to.\n\n payload: The mime type to send the payload in - either application/fhir+xml, or\n application/fhir+json. If the payload is not present, then there is no payload\n in the notification, just a notification. The mime type "text/plain" may also\n be used for Email and SMS subscriptions.\n\n header: Additional headers / information to send as part of the notification.\n\n ' from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.simple_types.url import urlSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema if ((max_recursion_limit and (nesting_list.count('Subscription_Channel') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['Subscription_Channel']) schema = StructType([StructField('id', StringType(), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('type', StringType(), True), StructField('endpoint', urlSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('payload', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('header', ArrayType(StringType()), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema<|docstring|>The subscription resource is used to define a push-based subscription from a server to another system. Once a subscription is registered with the server, the server checks every resource that is created or updated, and if the resource matches the given criteria, it sends a message on the defined "channel" so that another system can take an appropriate action. id: Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces. extension: May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). type: The type of channel to send notifications on. endpoint: The url that describes the actual end-point to send messages to. payload: The mime type to send the payload in - either application/fhir+xml, or application/fhir+json. If the payload is not present, then there is no payload in the notification, just a notification. The mime type "text/plain" may also be used for Email and SMS subscriptions. header: Additional headers / information to send as part of the notification.<|endoftext|>
a677c520baa25b1dd9f5831ab743fc0c3b12b6614d84c415a743009444631293
def hardness_plot(bin_file, hr_file, file_dir, filename, color_map, stn, wcs_fits): '\n Plot hardness ratio\n ' hdu = fits.open(wcs_fits)[0] wcs = WCS(hdu.header) (Bins, x_min, x_max, y_min, y_max) = read_in(bin_file, hr_file) fig = plt.figure() fig.set_size_inches(7, 7) ax = plt.axes(xlim=(x_min, x_max), ylim=(y_min, y_max), projection=wcs) N = len(Bins) cmap = mpl.cm.get_cmap(color_map) max_hr = max([bin.hr for bin in Bins]) median_hr = np.median([bin.hr for bin in Bins]) std_hr = np.std([bin.hr for bin in Bins]) Bins_flush = [] Bins_fail = [] step_val = 1 max_hr = max([bin.hr for bin in Bins]) min_hr = min([bin.hr for bin in Bins]) hr_list = [] hr_norm_list = [] bin_nums = [] for bin in Bins: hr_norm = ((bin.hr - min_hr) / (max_hr - min_hr)) hr_list.append(bin.hr) hr_norm_list.append(hr_norm) colors = cmap(hr_norm_list) rect_step = 0 for bin in Bins: patches = [] bin_nums.append(bin.bin_number) c = colors[rect_step] for pixel in bin.pixels: x_coord = pixel.pix_x y_coord = pixel.pix_y rectangle = plt.Rectangle((x_coord, y_coord), 1, 1, color=c) patches.append(rectangle) ax.add_patch(rectangle) rect_step += 1 colors = np.linspace(min(hr_norm_list), max(hr_norm_list), N) plt.xlabel('RA') plt.ylabel('DEC') norm = mpl.colors.Normalize(min(hr_norm_list), max(hr_norm_list)) (cax, _) = cbar.make_axes(ax) cb2 = cbar.ColorbarBase(cax, cmap=cmap, norm=norm) cb2.set_label('Hardness Ratio') tick_list = np.linspace(min(hr_norm_list), max(hr_norm_list), num_ticks) ticklabel_list = np.linspace(min_hr, max_hr, num_ticks) print(max_hr) ticklabel_list = [np.round(val, 2) for val in ticklabel_list] cb2.set_ticks(tick_list) cb2.set_ticklabels(ticklabel_list) cb2.update_ticks() plt.savefig((((((file_dir + '/') + filename) + '_') + str(stn)) + '_HR.png')) return ax
Plot hardness ratio
TemperatureMapPipeline/Hardness_Ratio.py
hardness_plot
crhea93/AstronomyTools
8
python
def hardness_plot(bin_file, hr_file, file_dir, filename, color_map, stn, wcs_fits): '\n \n ' hdu = fits.open(wcs_fits)[0] wcs = WCS(hdu.header) (Bins, x_min, x_max, y_min, y_max) = read_in(bin_file, hr_file) fig = plt.figure() fig.set_size_inches(7, 7) ax = plt.axes(xlim=(x_min, x_max), ylim=(y_min, y_max), projection=wcs) N = len(Bins) cmap = mpl.cm.get_cmap(color_map) max_hr = max([bin.hr for bin in Bins]) median_hr = np.median([bin.hr for bin in Bins]) std_hr = np.std([bin.hr for bin in Bins]) Bins_flush = [] Bins_fail = [] step_val = 1 max_hr = max([bin.hr for bin in Bins]) min_hr = min([bin.hr for bin in Bins]) hr_list = [] hr_norm_list = [] bin_nums = [] for bin in Bins: hr_norm = ((bin.hr - min_hr) / (max_hr - min_hr)) hr_list.append(bin.hr) hr_norm_list.append(hr_norm) colors = cmap(hr_norm_list) rect_step = 0 for bin in Bins: patches = [] bin_nums.append(bin.bin_number) c = colors[rect_step] for pixel in bin.pixels: x_coord = pixel.pix_x y_coord = pixel.pix_y rectangle = plt.Rectangle((x_coord, y_coord), 1, 1, color=c) patches.append(rectangle) ax.add_patch(rectangle) rect_step += 1 colors = np.linspace(min(hr_norm_list), max(hr_norm_list), N) plt.xlabel('RA') plt.ylabel('DEC') norm = mpl.colors.Normalize(min(hr_norm_list), max(hr_norm_list)) (cax, _) = cbar.make_axes(ax) cb2 = cbar.ColorbarBase(cax, cmap=cmap, norm=norm) cb2.set_label('Hardness Ratio') tick_list = np.linspace(min(hr_norm_list), max(hr_norm_list), num_ticks) ticklabel_list = np.linspace(min_hr, max_hr, num_ticks) print(max_hr) ticklabel_list = [np.round(val, 2) for val in ticklabel_list] cb2.set_ticks(tick_list) cb2.set_ticklabels(ticklabel_list) cb2.update_ticks() plt.savefig((((((file_dir + '/') + filename) + '_') + str(stn)) + '_HR.png')) return ax
def hardness_plot(bin_file, hr_file, file_dir, filename, color_map, stn, wcs_fits): '\n \n ' hdu = fits.open(wcs_fits)[0] wcs = WCS(hdu.header) (Bins, x_min, x_max, y_min, y_max) = read_in(bin_file, hr_file) fig = plt.figure() fig.set_size_inches(7, 7) ax = plt.axes(xlim=(x_min, x_max), ylim=(y_min, y_max), projection=wcs) N = len(Bins) cmap = mpl.cm.get_cmap(color_map) max_hr = max([bin.hr for bin in Bins]) median_hr = np.median([bin.hr for bin in Bins]) std_hr = np.std([bin.hr for bin in Bins]) Bins_flush = [] Bins_fail = [] step_val = 1 max_hr = max([bin.hr for bin in Bins]) min_hr = min([bin.hr for bin in Bins]) hr_list = [] hr_norm_list = [] bin_nums = [] for bin in Bins: hr_norm = ((bin.hr - min_hr) / (max_hr - min_hr)) hr_list.append(bin.hr) hr_norm_list.append(hr_norm) colors = cmap(hr_norm_list) rect_step = 0 for bin in Bins: patches = [] bin_nums.append(bin.bin_number) c = colors[rect_step] for pixel in bin.pixels: x_coord = pixel.pix_x y_coord = pixel.pix_y rectangle = plt.Rectangle((x_coord, y_coord), 1, 1, color=c) patches.append(rectangle) ax.add_patch(rectangle) rect_step += 1 colors = np.linspace(min(hr_norm_list), max(hr_norm_list), N) plt.xlabel('RA') plt.ylabel('DEC') norm = mpl.colors.Normalize(min(hr_norm_list), max(hr_norm_list)) (cax, _) = cbar.make_axes(ax) cb2 = cbar.ColorbarBase(cax, cmap=cmap, norm=norm) cb2.set_label('Hardness Ratio') tick_list = np.linspace(min(hr_norm_list), max(hr_norm_list), num_ticks) ticklabel_list = np.linspace(min_hr, max_hr, num_ticks) print(max_hr) ticklabel_list = [np.round(val, 2) for val in ticklabel_list] cb2.set_ticks(tick_list) cb2.set_ticklabels(ticklabel_list) cb2.update_ticks() plt.savefig((((((file_dir + '/') + filename) + '_') + str(stn)) + '_HR.png')) return ax<|docstring|>Plot hardness ratio<|endoftext|>