body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
3f47ac5db56a3f1396beff8f6872ce46c8ad0bec3400730e7a3bff11b5a8cf89
def toFloat(s): 'Safely convert a string to float number.' try: return float(s) except ValueError: return 0.0
Safely convert a string to float number.
CJK Metrics.glyphsReporter/Contents/Resources/plugin.py
toFloat
schriftgestalt/CJK-Metrics
2
python
def toFloat(s): try: return float(s) except ValueError: return 0.0
def toFloat(s): try: return float(s) except ValueError: return 0.0<|docstring|>Safely convert a string to float number.<|endoftext|>
8ce2cf76bd7319946c82dfa29aa4ea829369d4906fb64fca9741e0b71b18b8c8
@objc.python_method def drawMedialAxes(self, layer): 'Draw the medial axes (水平垂直轴线).' vertical = 0.5 horizontal = 0.5 scale = self.getScale() view = Glyphs.font.currentTab.graphicView() visibleRect = view.visibleRect() activePosition = view.activePosition() viewOriginX = ((visibleRect.origin.x - activePosition.x) / scale) viewOriginY = ((visibleRect.origin.y - activePosition.y) / scale) viewWidth = (visibleRect.size.width / scale) viewHeight = (visibleRect.size.height / scale) height = (layer.ascender - layer.descender) width = layer.width x = (horizontal * width) y = (layer.descender + (vertical * height)) color = NSColor.systemGreenColor() color.set() path = NSBezierPath.bezierPath() path.moveToPoint_((viewOriginX, y)) path.lineToPoint_(((viewOriginX + viewWidth), y)) path.moveToPoint_((x, viewOriginY)) path.lineToPoint_((x, (viewOriginY + viewHeight))) path.setLineWidth_((1 / scale)) path.stroke()
Draw the medial axes (水平垂直轴线).
CJK Metrics.glyphsReporter/Contents/Resources/plugin.py
drawMedialAxes
schriftgestalt/CJK-Metrics
2
python
@objc.python_method def drawMedialAxes(self, layer): vertical = 0.5 horizontal = 0.5 scale = self.getScale() view = Glyphs.font.currentTab.graphicView() visibleRect = view.visibleRect() activePosition = view.activePosition() viewOriginX = ((visibleRect.origin.x - activePosition.x) / scale) viewOriginY = ((visibleRect.origin.y - activePosition.y) / scale) viewWidth = (visibleRect.size.width / scale) viewHeight = (visibleRect.size.height / scale) height = (layer.ascender - layer.descender) width = layer.width x = (horizontal * width) y = (layer.descender + (vertical * height)) color = NSColor.systemGreenColor() color.set() path = NSBezierPath.bezierPath() path.moveToPoint_((viewOriginX, y)) path.lineToPoint_(((viewOriginX + viewWidth), y)) path.moveToPoint_((x, viewOriginY)) path.lineToPoint_((x, (viewOriginY + viewHeight))) path.setLineWidth_((1 / scale)) path.stroke()
@objc.python_method def drawMedialAxes(self, layer): vertical = 0.5 horizontal = 0.5 scale = self.getScale() view = Glyphs.font.currentTab.graphicView() visibleRect = view.visibleRect() activePosition = view.activePosition() viewOriginX = ((visibleRect.origin.x - activePosition.x) / scale) viewOriginY = ((visibleRect.origin.y - activePosition.y) / scale) viewWidth = (visibleRect.size.width / scale) viewHeight = (visibleRect.size.height / scale) height = (layer.ascender - layer.descender) width = layer.width x = (horizontal * width) y = (layer.descender + (vertical * height)) color = NSColor.systemGreenColor() color.set() path = NSBezierPath.bezierPath() path.moveToPoint_((viewOriginX, y)) path.lineToPoint_(((viewOriginX + viewWidth), y)) path.moveToPoint_((x, viewOriginY)) path.lineToPoint_((x, (viewOriginY + viewHeight))) path.setLineWidth_((1 / scale)) path.stroke()<|docstring|>Draw the medial axes (水平垂直轴线).<|endoftext|>
5938cecafa6e98afd980e8d0c754e97698f690ca7456bc738eb51ab6ed16f9e5
@objc.python_method def drawCentralArea(self, layer): 'Draw the central area (第二中心区域).' spacing = self.centralAreaSpacing descender = layer.descender ascender = layer.ascender if (not self.centralAreaRotateState): width = self.centralAreaWidth height = (ascender - descender) x_mid = ((layer.width * self.centralAreaPosition) / 100) (x0, y0) = (((x_mid - (spacing / 2)) - (width / 2)), descender) (x1, y1) = (((x_mid + (spacing / 2)) - (width / 2)), descender) else: width = layer.width height = self.centralAreaWidth y_mid = (descender + (((ascender - descender) * self.centralAreaPosition) / 100)) (x0, y0) = (0, ((y_mid - (spacing / 2)) - (height / 2))) (x1, y1) = (0, ((y_mid + (spacing / 2)) - (height / 2))) color = NSColor.systemGrayColor().colorWithAlphaComponent_(0.2) color.set() NSBezierPath.fillRect_(((x0, y0), (width, height))) NSBezierPath.fillRect_(((x1, y1), (width, height)))
Draw the central area (第二中心区域).
CJK Metrics.glyphsReporter/Contents/Resources/plugin.py
drawCentralArea
schriftgestalt/CJK-Metrics
2
python
@objc.python_method def drawCentralArea(self, layer): spacing = self.centralAreaSpacing descender = layer.descender ascender = layer.ascender if (not self.centralAreaRotateState): width = self.centralAreaWidth height = (ascender - descender) x_mid = ((layer.width * self.centralAreaPosition) / 100) (x0, y0) = (((x_mid - (spacing / 2)) - (width / 2)), descender) (x1, y1) = (((x_mid + (spacing / 2)) - (width / 2)), descender) else: width = layer.width height = self.centralAreaWidth y_mid = (descender + (((ascender - descender) * self.centralAreaPosition) / 100)) (x0, y0) = (0, ((y_mid - (spacing / 2)) - (height / 2))) (x1, y1) = (0, ((y_mid + (spacing / 2)) - (height / 2))) color = NSColor.systemGrayColor().colorWithAlphaComponent_(0.2) color.set() NSBezierPath.fillRect_(((x0, y0), (width, height))) NSBezierPath.fillRect_(((x1, y1), (width, height)))
@objc.python_method def drawCentralArea(self, layer): spacing = self.centralAreaSpacing descender = layer.descender ascender = layer.ascender if (not self.centralAreaRotateState): width = self.centralAreaWidth height = (ascender - descender) x_mid = ((layer.width * self.centralAreaPosition) / 100) (x0, y0) = (((x_mid - (spacing / 2)) - (width / 2)), descender) (x1, y1) = (((x_mid + (spacing / 2)) - (width / 2)), descender) else: width = layer.width height = self.centralAreaWidth y_mid = (descender + (((ascender - descender) * self.centralAreaPosition) / 100)) (x0, y0) = (0, ((y_mid - (spacing / 2)) - (height / 2))) (x1, y1) = (0, ((y_mid + (spacing / 2)) - (height / 2))) color = NSColor.systemGrayColor().colorWithAlphaComponent_(0.2) color.set() NSBezierPath.fillRect_(((x0, y0), (width, height))) NSBezierPath.fillRect_(((x1, y1), (width, height)))<|docstring|>Draw the central area (第二中心区域).<|endoftext|>
4bd34a1effd180ed02847897175b4d6ba7ba8be3adb2e71f039a57e27ddd38ee
@objc.python_method def drawCjkGuide(self, layer): 'Draw the CJK guide (汉字参考线).' self.initCjkGuideGlyph() color = NSColor.systemOrangeColor().colorWithAlphaComponent_(0.1) color.set() cjkGuideLayer = Glyphs.font.glyphs[CJK_GUIDE_GLYPH].layers[0] trans = NSAffineTransform.transform() if self.cjkGuideScalingState: xScale = (layer.width / cjkGuideLayer.width) trans.scaleXBy_yBy_(xScale, 1) if (cjkGuideLayer.bezierPath is not None): path = cjkGuideLayer.bezierPath.copy() path.transformUsingAffineTransform_(trans) path.fill()
Draw the CJK guide (汉字参考线).
CJK Metrics.glyphsReporter/Contents/Resources/plugin.py
drawCjkGuide
schriftgestalt/CJK-Metrics
2
python
@objc.python_method def drawCjkGuide(self, layer): self.initCjkGuideGlyph() color = NSColor.systemOrangeColor().colorWithAlphaComponent_(0.1) color.set() cjkGuideLayer = Glyphs.font.glyphs[CJK_GUIDE_GLYPH].layers[0] trans = NSAffineTransform.transform() if self.cjkGuideScalingState: xScale = (layer.width / cjkGuideLayer.width) trans.scaleXBy_yBy_(xScale, 1) if (cjkGuideLayer.bezierPath is not None): path = cjkGuideLayer.bezierPath.copy() path.transformUsingAffineTransform_(trans) path.fill()
@objc.python_method def drawCjkGuide(self, layer): self.initCjkGuideGlyph() color = NSColor.systemOrangeColor().colorWithAlphaComponent_(0.1) color.set() cjkGuideLayer = Glyphs.font.glyphs[CJK_GUIDE_GLYPH].layers[0] trans = NSAffineTransform.transform() if self.cjkGuideScalingState: xScale = (layer.width / cjkGuideLayer.width) trans.scaleXBy_yBy_(xScale, 1) if (cjkGuideLayer.bezierPath is not None): path = cjkGuideLayer.bezierPath.copy() path.transformUsingAffineTransform_(trans) path.fill()<|docstring|>Draw the CJK guide (汉字参考线).<|endoftext|>
88775dc2a318c9c26b56dc1ed81092dd4ddbe5a49468f76d583fce675b27c7d3
@objc.python_method def __file__(self): 'Please leave this method unchanged' return __file__
Please leave this method unchanged
CJK Metrics.glyphsReporter/Contents/Resources/plugin.py
__file__
schriftgestalt/CJK-Metrics
2
python
@objc.python_method def __file__(self): return __file__
@objc.python_method def __file__(self): return __file__<|docstring|>Please leave this method unchanged<|endoftext|>
e035dce185f649087e2b92fd3f27f63d35d0d7ed6602f6e2e22bd41a747ef2f0
def get_classes(path): '获取分类列表' classes = [] with open(os.path.join(path, 'classes.csv'), 'r', encoding='utf-8') as f: for line in f.readlines(): classes.append(line.strip()) return classes
获取分类列表
preprocess.py
get_classes
MeanZhang/TextClassification
3
python
def get_classes(path): classes = [] with open(os.path.join(path, 'classes.csv'), 'r', encoding='utf-8') as f: for line in f.readlines(): classes.append(line.strip()) return classes
def get_classes(path): classes = [] with open(os.path.join(path, 'classes.csv'), 'r', encoding='utf-8') as f: for line in f.readlines(): classes.append(line.strip()) return classes<|docstring|>获取分类列表<|endoftext|>
ba47a1069018c5ccb789a2f66dd1562cddcaa36bfa5eaa9a7fbc88f7665059c6
def get_field(path): '创建Field' with open(os.path.join(path, 'stop_words.txt'), 'r', encoding='utf-8') as f: stop_words = [word.strip('\n') for word in f.readlines()] TEXT = data.Field(tokenize=jieba.lcut, lower=True, stop_words=stop_words) LABEL = data.Field(sequential=False, use_vocab=False) return (TEXT, LABEL)
创建Field
preprocess.py
get_field
MeanZhang/TextClassification
3
python
def get_field(path): with open(os.path.join(path, 'stop_words.txt'), 'r', encoding='utf-8') as f: stop_words = [word.strip('\n') for word in f.readlines()] TEXT = data.Field(tokenize=jieba.lcut, lower=True, stop_words=stop_words) LABEL = data.Field(sequential=False, use_vocab=False) return (TEXT, LABEL)
def get_field(path): with open(os.path.join(path, 'stop_words.txt'), 'r', encoding='utf-8') as f: stop_words = [word.strip('\n') for word in f.readlines()] TEXT = data.Field(tokenize=jieba.lcut, lower=True, stop_words=stop_words) LABEL = data.Field(sequential=False, use_vocab=False) return (TEXT, LABEL)<|docstring|>创建Field<|endoftext|>
48bca8407e3d51121f7d45dbdacc9e18ea3f7d843587ed679e80419b5e62c164
def preprocess(path, text, label, args): '预处理' (train, val, test) = data.TabularDataset.splits(path=path, train='train.tsv', validation='val.tsv', test='test.tsv', format='tsv', fields=[('label', label), ('text', text)]) text.build_vocab(train, val) with open(os.path.join(path, 'vocab.pkl'), 'wb') as f: pickle.dump(text.vocab, f) (train_iter, val_iter, test_iter) = data.BucketIterator.splits((train, val, test), batch_sizes=(args.batch_size, args.batch_size, args.batch_size), device=args.device, sort_key=(lambda x: len(x.text))) return (train_iter, val_iter, test_iter)
预处理
preprocess.py
preprocess
MeanZhang/TextClassification
3
python
def preprocess(path, text, label, args): (train, val, test) = data.TabularDataset.splits(path=path, train='train.tsv', validation='val.tsv', test='test.tsv', format='tsv', fields=[('label', label), ('text', text)]) text.build_vocab(train, val) with open(os.path.join(path, 'vocab.pkl'), 'wb') as f: pickle.dump(text.vocab, f) (train_iter, val_iter, test_iter) = data.BucketIterator.splits((train, val, test), batch_sizes=(args.batch_size, args.batch_size, args.batch_size), device=args.device, sort_key=(lambda x: len(x.text))) return (train_iter, val_iter, test_iter)
def preprocess(path, text, label, args): (train, val, test) = data.TabularDataset.splits(path=path, train='train.tsv', validation='val.tsv', test='test.tsv', format='tsv', fields=[('label', label), ('text', text)]) text.build_vocab(train, val) with open(os.path.join(path, 'vocab.pkl'), 'wb') as f: pickle.dump(text.vocab, f) (train_iter, val_iter, test_iter) = data.BucketIterator.splits((train, val, test), batch_sizes=(args.batch_size, args.batch_size, args.batch_size), device=args.device, sort_key=(lambda x: len(x.text))) return (train_iter, val_iter, test_iter)<|docstring|>预处理<|endoftext|>
daf2cd6bc7169da82a66afc7497f904f33207e1666c07e238b75b3dc2a6eba4f
@defNode('Remove Dictionary Key', returnNames=['key'], isExecutable=True, identifier=COLLECTION_IDENTIFIER) def removeDictKey(dictionary, key): '\n Returns the removed key. If the key does not exist None is returned.\n ' return dictionary.pop(key, None)
Returns the removed key. If the key does not exist None is returned.
node_exec/collection_nodes.py
removeDictKey
compix/NodeGraphQt
0
python
@defNode('Remove Dictionary Key', returnNames=['key'], isExecutable=True, identifier=COLLECTION_IDENTIFIER) def removeDictKey(dictionary, key): '\n \n ' return dictionary.pop(key, None)
@defNode('Remove Dictionary Key', returnNames=['key'], isExecutable=True, identifier=COLLECTION_IDENTIFIER) def removeDictKey(dictionary, key): '\n \n ' return dictionary.pop(key, None)<|docstring|>Returns the removed key. If the key does not exist None is returned.<|endoftext|>
05df4bafa159ce5c4ffc60d3c9688bc4cd9d60734f568bbeda225426d5f139ee
def _validate_not_subset(of, allow_none=False): '\n Create validator to check if an attribute is not a subset of ``of``.\n\n Parameters\n ----------\n of: str\n Attribute name that the subject under validation should not be a subset of.\n\n Returns\n -------\n validator: Callable\n Validator that can be used for ``attr.ib``.\n ' def _v(instance, attribute, value): if (allow_none and (value is None)): return other_set = set(getattr(instance, of)) if isinstance(value, str): my_set = {value} else: my_set = set(value) share = (my_set & other_set) if share: raise ValueError('{attribute} cannot share columns with {of}, but share the following: {share}'.format(attribute=attribute.name, of=of, share=', '.join(sorted(share)))) return _v
Create validator to check if an attribute is not a subset of ``of``. Parameters ---------- of: str Attribute name that the subject under validation should not be a subset of. Returns ------- validator: Callable Validator that can be used for ``attr.ib``.
kartothek/core/cube/cube.py
_validate_not_subset
martin-haffner-by/kartothek
171
python
def _validate_not_subset(of, allow_none=False): '\n Create validator to check if an attribute is not a subset of ``of``.\n\n Parameters\n ----------\n of: str\n Attribute name that the subject under validation should not be a subset of.\n\n Returns\n -------\n validator: Callable\n Validator that can be used for ``attr.ib``.\n ' def _v(instance, attribute, value): if (allow_none and (value is None)): return other_set = set(getattr(instance, of)) if isinstance(value, str): my_set = {value} else: my_set = set(value) share = (my_set & other_set) if share: raise ValueError('{attribute} cannot share columns with {of}, but share the following: {share}'.format(attribute=attribute.name, of=of, share=', '.join(sorted(share)))) return _v
def _validate_not_subset(of, allow_none=False): '\n Create validator to check if an attribute is not a subset of ``of``.\n\n Parameters\n ----------\n of: str\n Attribute name that the subject under validation should not be a subset of.\n\n Returns\n -------\n validator: Callable\n Validator that can be used for ``attr.ib``.\n ' def _v(instance, attribute, value): if (allow_none and (value is None)): return other_set = set(getattr(instance, of)) if isinstance(value, str): my_set = {value} else: my_set = set(value) share = (my_set & other_set) if share: raise ValueError('{attribute} cannot share columns with {of}, but share the following: {share}'.format(attribute=attribute.name, of=of, share=', '.join(sorted(share)))) return _v<|docstring|>Create validator to check if an attribute is not a subset of ``of``. Parameters ---------- of: str Attribute name that the subject under validation should not be a subset of. Returns ------- validator: Callable Validator that can be used for ``attr.ib``.<|endoftext|>
9d24401015158962dfefb0a5b3698f396b544ce0e364230ab4237340a801e93e
def _validate_subset(of, allow_none=False): '\n Create validator to check that an attribute is a subset of ``of``.\n\n Parameters\n ----------\n of: str\n Attribute name that the subject under validation should be a subset of.\n\n Returns\n -------\n validator: Callable\n Validator that can be used for ``attr.ib``.\n ' def _v(instance, attribute, value): if (allow_none and (value is None)): return other_set = set(getattr(instance, of)) if isinstance(value, str): my_set = {value} else: my_set = set(value) too_much = (my_set - other_set) if too_much: raise ValueError('{attribute} must be a subset of {of}, but it has additional values: {too_much}'.format(attribute=attribute.name, of=of, too_much=', '.join(sorted(too_much)))) return _v
Create validator to check that an attribute is a subset of ``of``. Parameters ---------- of: str Attribute name that the subject under validation should be a subset of. Returns ------- validator: Callable Validator that can be used for ``attr.ib``.
kartothek/core/cube/cube.py
_validate_subset
martin-haffner-by/kartothek
171
python
def _validate_subset(of, allow_none=False): '\n Create validator to check that an attribute is a subset of ``of``.\n\n Parameters\n ----------\n of: str\n Attribute name that the subject under validation should be a subset of.\n\n Returns\n -------\n validator: Callable\n Validator that can be used for ``attr.ib``.\n ' def _v(instance, attribute, value): if (allow_none and (value is None)): return other_set = set(getattr(instance, of)) if isinstance(value, str): my_set = {value} else: my_set = set(value) too_much = (my_set - other_set) if too_much: raise ValueError('{attribute} must be a subset of {of}, but it has additional values: {too_much}'.format(attribute=attribute.name, of=of, too_much=', '.join(sorted(too_much)))) return _v
def _validate_subset(of, allow_none=False): '\n Create validator to check that an attribute is a subset of ``of``.\n\n Parameters\n ----------\n of: str\n Attribute name that the subject under validation should be a subset of.\n\n Returns\n -------\n validator: Callable\n Validator that can be used for ``attr.ib``.\n ' def _v(instance, attribute, value): if (allow_none and (value is None)): return other_set = set(getattr(instance, of)) if isinstance(value, str): my_set = {value} else: my_set = set(value) too_much = (my_set - other_set) if too_much: raise ValueError('{attribute} must be a subset of {of}, but it has additional values: {too_much}'.format(attribute=attribute.name, of=of, too_much=', '.join(sorted(too_much)))) return _v<|docstring|>Create validator to check that an attribute is a subset of ``of``. Parameters ---------- of: str Attribute name that the subject under validation should be a subset of. Returns ------- validator: Callable Validator that can be used for ``attr.ib``.<|endoftext|>
650a9923ea6e8b02321f7faece3a3c6e0db8633a8633d9052305e1c1090a30ba
def _validator_uuid(instance, attribute, value): '\n Attr validator to validate if UUIDs are valid.\n ' _validator_uuid_freestanding(attribute.name, value)
Attr validator to validate if UUIDs are valid.
kartothek/core/cube/cube.py
_validator_uuid
martin-haffner-by/kartothek
171
python
def _validator_uuid(instance, attribute, value): '\n \n ' _validator_uuid_freestanding(attribute.name, value)
def _validator_uuid(instance, attribute, value): '\n \n ' _validator_uuid_freestanding(attribute.name, value)<|docstring|>Attr validator to validate if UUIDs are valid.<|endoftext|>
acfbcd642e077a4c48cef0212f36331a25ac0283077266fbf082296e2ff23e3a
def _validator_uuid_freestanding(name, value): '\n Freestanding version of :meth:`_validate_not_subset`.\n ' if (not _validate_uuid(value)): raise ValueError('{name} ("{value}") is not compatible with kartothek'.format(name=name, value=value)) if (value.find(KTK_CUBE_UUID_SEPERATOR) != (- 1)): raise ValueError('{name} ("{value}") must not contain UUID separator {sep}'.format(name=name, value=value, sep=KTK_CUBE_UUID_SEPERATOR))
Freestanding version of :meth:`_validate_not_subset`.
kartothek/core/cube/cube.py
_validator_uuid_freestanding
martin-haffner-by/kartothek
171
python
def _validator_uuid_freestanding(name, value): '\n \n ' if (not _validate_uuid(value)): raise ValueError('{name} ("{value}") is not compatible with kartothek'.format(name=name, value=value)) if (value.find(KTK_CUBE_UUID_SEPERATOR) != (- 1)): raise ValueError('{name} ("{value}") must not contain UUID separator {sep}'.format(name=name, value=value, sep=KTK_CUBE_UUID_SEPERATOR))
def _validator_uuid_freestanding(name, value): '\n \n ' if (not _validate_uuid(value)): raise ValueError('{name} ("{value}") is not compatible with kartothek'.format(name=name, value=value)) if (value.find(KTK_CUBE_UUID_SEPERATOR) != (- 1)): raise ValueError('{name} ("{value}") must not contain UUID separator {sep}'.format(name=name, value=value, sep=KTK_CUBE_UUID_SEPERATOR))<|docstring|>Freestanding version of :meth:`_validate_not_subset`.<|endoftext|>
1a28891015f8284440b418f733b9cd1db2333f1ae94845a5e1066ab80d03cf87
def _validator_not_empty(instance, attribute, value): '\n Attr validator to validate that a list is not empty:\n ' if (len(value) == 0): raise ValueError('{name} must not be empty'.format(name=attribute.name))
Attr validator to validate that a list is not empty:
kartothek/core/cube/cube.py
_validator_not_empty
martin-haffner-by/kartothek
171
python
def _validator_not_empty(instance, attribute, value): '\n \n ' if (len(value) == 0): raise ValueError('{name} must not be empty'.format(name=attribute.name))
def _validator_not_empty(instance, attribute, value): '\n \n ' if (len(value) == 0): raise ValueError('{name} must not be empty'.format(name=attribute.name))<|docstring|>Attr validator to validate that a list is not empty:<|endoftext|>
12f44525249924e280f3f82c8c69581ea541b47cd5c257f01393deb7285aa01b
def ktk_dataset_uuid(self, ktk_cube_dataset_id): '\n Get Kartothek dataset UUID for given dataset UUID, so the prefix is included.\n\n Parameters\n ----------\n ktk_cube_dataset_id: str\n Dataset ID w/o prefix\n\n Returns\n -------\n ktk_dataset_uuid: str\n Prefixed dataset UUID for Kartothek.\n\n Raises\n ------\n ValueError\n If ``ktk_cube_dataset_id`` is not a string or if it is not a valid UUID.\n ' ktk_cube_dataset_id = converter_str(ktk_cube_dataset_id) _validator_uuid_freestanding('ktk_cube_dataset_id', ktk_cube_dataset_id) return '{uuid_prefix}{sep}{ktk_cube_dataset_id}'.format(uuid_prefix=self.uuid_prefix, sep=KTK_CUBE_UUID_SEPERATOR, ktk_cube_dataset_id=ktk_cube_dataset_id)
Get Kartothek dataset UUID for given dataset UUID, so the prefix is included. Parameters ---------- ktk_cube_dataset_id: str Dataset ID w/o prefix Returns ------- ktk_dataset_uuid: str Prefixed dataset UUID for Kartothek. Raises ------ ValueError If ``ktk_cube_dataset_id`` is not a string or if it is not a valid UUID.
kartothek/core/cube/cube.py
ktk_dataset_uuid
martin-haffner-by/kartothek
171
python
def ktk_dataset_uuid(self, ktk_cube_dataset_id): '\n Get Kartothek dataset UUID for given dataset UUID, so the prefix is included.\n\n Parameters\n ----------\n ktk_cube_dataset_id: str\n Dataset ID w/o prefix\n\n Returns\n -------\n ktk_dataset_uuid: str\n Prefixed dataset UUID for Kartothek.\n\n Raises\n ------\n ValueError\n If ``ktk_cube_dataset_id`` is not a string or if it is not a valid UUID.\n ' ktk_cube_dataset_id = converter_str(ktk_cube_dataset_id) _validator_uuid_freestanding('ktk_cube_dataset_id', ktk_cube_dataset_id) return '{uuid_prefix}{sep}{ktk_cube_dataset_id}'.format(uuid_prefix=self.uuid_prefix, sep=KTK_CUBE_UUID_SEPERATOR, ktk_cube_dataset_id=ktk_cube_dataset_id)
def ktk_dataset_uuid(self, ktk_cube_dataset_id): '\n Get Kartothek dataset UUID for given dataset UUID, so the prefix is included.\n\n Parameters\n ----------\n ktk_cube_dataset_id: str\n Dataset ID w/o prefix\n\n Returns\n -------\n ktk_dataset_uuid: str\n Prefixed dataset UUID for Kartothek.\n\n Raises\n ------\n ValueError\n If ``ktk_cube_dataset_id`` is not a string or if it is not a valid UUID.\n ' ktk_cube_dataset_id = converter_str(ktk_cube_dataset_id) _validator_uuid_freestanding('ktk_cube_dataset_id', ktk_cube_dataset_id) return '{uuid_prefix}{sep}{ktk_cube_dataset_id}'.format(uuid_prefix=self.uuid_prefix, sep=KTK_CUBE_UUID_SEPERATOR, ktk_cube_dataset_id=ktk_cube_dataset_id)<|docstring|>Get Kartothek dataset UUID for given dataset UUID, so the prefix is included. Parameters ---------- ktk_cube_dataset_id: str Dataset ID w/o prefix Returns ------- ktk_dataset_uuid: str Prefixed dataset UUID for Kartothek. Raises ------ ValueError If ``ktk_cube_dataset_id`` is not a string or if it is not a valid UUID.<|endoftext|>
3ef57cd97f7e61d0f7d3dc9aa134c1ae699bcc21bcac2a7572f89b9e99776ab7
@property def ktk_index_columns(self): '\n Set of all available index columns through Kartothek, primary and secondary.\n ' return ((set(self.partition_columns) | set(self.index_columns)) | (set(self.dimension_columns) - set(self.suppress_index_on)))
Set of all available index columns through Kartothek, primary and secondary.
kartothek/core/cube/cube.py
ktk_index_columns
martin-haffner-by/kartothek
171
python
@property def ktk_index_columns(self): '\n \n ' return ((set(self.partition_columns) | set(self.index_columns)) | (set(self.dimension_columns) - set(self.suppress_index_on)))
@property def ktk_index_columns(self): '\n \n ' return ((set(self.partition_columns) | set(self.index_columns)) | (set(self.dimension_columns) - set(self.suppress_index_on)))<|docstring|>Set of all available index columns through Kartothek, primary and secondary.<|endoftext|>
daf3d730b81731dea6e2699319d1ca6ee5d96f085909398ad4a883a3629ba495
def copy(self, **kwargs): '\n Create a new cube specification w/ changed attributes.\n\n This will not trigger any IO operation, but only affects the cube specification.\n\n Parameters\n ----------\n kwargs: Dict[str, Any]\n Attributes that should be changed.\n\n Returns\n -------\n cube: Cube\n New abstract cube.\n ' return attr.evolve(self, **kwargs)
Create a new cube specification w/ changed attributes. This will not trigger any IO operation, but only affects the cube specification. Parameters ---------- kwargs: Dict[str, Any] Attributes that should be changed. Returns ------- cube: Cube New abstract cube.
kartothek/core/cube/cube.py
copy
martin-haffner-by/kartothek
171
python
def copy(self, **kwargs): '\n Create a new cube specification w/ changed attributes.\n\n This will not trigger any IO operation, but only affects the cube specification.\n\n Parameters\n ----------\n kwargs: Dict[str, Any]\n Attributes that should be changed.\n\n Returns\n -------\n cube: Cube\n New abstract cube.\n ' return attr.evolve(self, **kwargs)
def copy(self, **kwargs): '\n Create a new cube specification w/ changed attributes.\n\n This will not trigger any IO operation, but only affects the cube specification.\n\n Parameters\n ----------\n kwargs: Dict[str, Any]\n Attributes that should be changed.\n\n Returns\n -------\n cube: Cube\n New abstract cube.\n ' return attr.evolve(self, **kwargs)<|docstring|>Create a new cube specification w/ changed attributes. This will not trigger any IO operation, but only affects the cube specification. Parameters ---------- kwargs: Dict[str, Any] Attributes that should be changed. Returns ------- cube: Cube New abstract cube.<|endoftext|>
92e1a3d5fa9a8cc46787e8898b983047fe43960c324715ccc2e7ceb3eaccbaa4
@staticmethod def initialize(): ' Installs GITNB\n - installs git pre-commit hook\n - creates .gitnb dir\n ' if os.path.isfile(GIT_PC): nb_match = utils.nb_matching_lines('gitnb', GIT_PC) else: nb_match = 0 if (nb_match > 0): print('\ngitnb[WARNING]:') print('\tit appears you have already initialized gitnb for') print('\tthis project. verify: cat .git/hooks/pre-commit\n') elif os.path.exists(GIT_DIR): cmd1 = 'cp -R {} {}'.format(DOT_GITNB_CONFIG_DIR, GITNB_CONFIG_DIR) os.system(cmd1) utils.copy_append(PRECOMMIT_SCRIPT, GIT_PC) cmd2 = 'chmod +x {}'.format(GIT_PC) os.system(cmd2) print('\ngitnb: INSTALLED ') print('\t - nbpy.py files will be created/updated/tracked') print('\t - install user config with: $ gitnb configure\n') else: print('gitnb: MUST INITIALIZE GIT')
Installs GITNB - installs git pre-commit hook - creates .gitnb dir
gitnb/project.py
initialize
brookisme/nb_git
14
python
@staticmethod def initialize(): ' Installs GITNB\n - installs git pre-commit hook\n - creates .gitnb dir\n ' if os.path.isfile(GIT_PC): nb_match = utils.nb_matching_lines('gitnb', GIT_PC) else: nb_match = 0 if (nb_match > 0): print('\ngitnb[WARNING]:') print('\tit appears you have already initialized gitnb for') print('\tthis project. verify: cat .git/hooks/pre-commit\n') elif os.path.exists(GIT_DIR): cmd1 = 'cp -R {} {}'.format(DOT_GITNB_CONFIG_DIR, GITNB_CONFIG_DIR) os.system(cmd1) utils.copy_append(PRECOMMIT_SCRIPT, GIT_PC) cmd2 = 'chmod +x {}'.format(GIT_PC) os.system(cmd2) print('\ngitnb: INSTALLED ') print('\t - nbpy.py files will be created/updated/tracked') print('\t - install user config with: $ gitnb configure\n') else: print('gitnb: MUST INITIALIZE GIT')
@staticmethod def initialize(): ' Installs GITNB\n - installs git pre-commit hook\n - creates .gitnb dir\n ' if os.path.isfile(GIT_PC): nb_match = utils.nb_matching_lines('gitnb', GIT_PC) else: nb_match = 0 if (nb_match > 0): print('\ngitnb[WARNING]:') print('\tit appears you have already initialized gitnb for') print('\tthis project. verify: cat .git/hooks/pre-commit\n') elif os.path.exists(GIT_DIR): cmd1 = 'cp -R {} {}'.format(DOT_GITNB_CONFIG_DIR, GITNB_CONFIG_DIR) os.system(cmd1) utils.copy_append(PRECOMMIT_SCRIPT, GIT_PC) cmd2 = 'chmod +x {}'.format(GIT_PC) os.system(cmd2) print('\ngitnb: INSTALLED ') print('\t - nbpy.py files will be created/updated/tracked') print('\t - install user config with: $ gitnb configure\n') else: print('gitnb: MUST INITIALIZE GIT')<|docstring|>Installs GITNB - installs git pre-commit hook - creates .gitnb dir<|endoftext|>
43573e984cb358638e8e500831ee8d0c052ba47a18f187d7b4c0aeca8e34b936
@staticmethod def configure(): ' Install config file\n allows user to change config\n ' utils.copy_append(DEFAULT_CONFIG, USER_CONFIG, 'w') print('gitnb: USER CONFIG FILE ADDED ({}) '.format(USER_CONFIG))
Install config file allows user to change config
gitnb/project.py
configure
brookisme/nb_git
14
python
@staticmethod def configure(): ' Install config file\n allows user to change config\n ' utils.copy_append(DEFAULT_CONFIG, USER_CONFIG, 'w') print('gitnb: USER CONFIG FILE ADDED ({}) '.format(USER_CONFIG))
@staticmethod def configure(): ' Install config file\n allows user to change config\n ' utils.copy_append(DEFAULT_CONFIG, USER_CONFIG, 'w') print('gitnb: USER CONFIG FILE ADDED ({}) '.format(USER_CONFIG))<|docstring|>Install config file allows user to change config<|endoftext|>
2b28532c6709525b5a93deb63a09db94b4807ef704f9582d26dc8cbe3b4699e2
@pytest.mark.parametrize('test_input,expected', [('dataset/table/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': None, 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': None}), ('dataset/table/$20201030/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': '$20201030', 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': None}), ('dataset/table/$20201030/batch_id/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': '$20201030', 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': 'batch_id'}), ('dataset/table/batch_id/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': None, 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': 'batch_id'}), ('dataset/table/2020/01/02/03/batch_id/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': None, 'yyyy': '2020', 'mm': '01', 'dd': '02', 'hh': '03', 'batch': 'batch_id'}), ('project.dataset/table/2020/01/02/03/batch_id/_SUCCESS', {'dataset': 'project.dataset', 'table': 'table', 'partition': None, 'yyyy': '2020', 'mm': '01', 'dd': '02', 'hh': '03', 'batch': 'batch_id'})]) def test_default_destination_regex(test_input: str, expected: Dict[(str, Optional[str])]): 'ensure our default regex handles each scenarios we document.\n this test is to support improving this regex in the future w/o regressing\n for existing use cases.\n ' match = COMPILED_DEFAULT_DENTINATION_REGEX.match(test_input) if match: assert (match.groupdict() == expected) else: raise AssertionError(f'{COMPILED_DEFAULT_DENTINATION_REGEX} did not match test case {test_input}.')
ensure our default regex handles each scenarios we document. this test is to support improving this regex in the future w/o regressing for existing use cases.
tools/cloud_functions/gcs_event_based_ingest/tests/gcs_ocn_bq_ingest/test_gcs_ocn_bq_ingest.py
test_default_destination_regex
saher4bc/bigquery-utils
1
python
@pytest.mark.parametrize('test_input,expected', [('dataset/table/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': None, 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': None}), ('dataset/table/$20201030/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': '$20201030', 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': None}), ('dataset/table/$20201030/batch_id/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': '$20201030', 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': 'batch_id'}), ('dataset/table/batch_id/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': None, 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': 'batch_id'}), ('dataset/table/2020/01/02/03/batch_id/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': None, 'yyyy': '2020', 'mm': '01', 'dd': '02', 'hh': '03', 'batch': 'batch_id'}), ('project.dataset/table/2020/01/02/03/batch_id/_SUCCESS', {'dataset': 'project.dataset', 'table': 'table', 'partition': None, 'yyyy': '2020', 'mm': '01', 'dd': '02', 'hh': '03', 'batch': 'batch_id'})]) def test_default_destination_regex(test_input: str, expected: Dict[(str, Optional[str])]): 'ensure our default regex handles each scenarios we document.\n this test is to support improving this regex in the future w/o regressing\n for existing use cases.\n ' match = COMPILED_DEFAULT_DENTINATION_REGEX.match(test_input) if match: assert (match.groupdict() == expected) else: raise AssertionError(f'{COMPILED_DEFAULT_DENTINATION_REGEX} did not match test case {test_input}.')
@pytest.mark.parametrize('test_input,expected', [('dataset/table/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': None, 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': None}), ('dataset/table/$20201030/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': '$20201030', 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': None}), ('dataset/table/$20201030/batch_id/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': '$20201030', 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': 'batch_id'}), ('dataset/table/batch_id/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': None, 'yyyy': None, 'mm': None, 'dd': None, 'hh': None, 'batch': 'batch_id'}), ('dataset/table/2020/01/02/03/batch_id/_SUCCESS', {'dataset': 'dataset', 'table': 'table', 'partition': None, 'yyyy': '2020', 'mm': '01', 'dd': '02', 'hh': '03', 'batch': 'batch_id'}), ('project.dataset/table/2020/01/02/03/batch_id/_SUCCESS', {'dataset': 'project.dataset', 'table': 'table', 'partition': None, 'yyyy': '2020', 'mm': '01', 'dd': '02', 'hh': '03', 'batch': 'batch_id'})]) def test_default_destination_regex(test_input: str, expected: Dict[(str, Optional[str])]): 'ensure our default regex handles each scenarios we document.\n this test is to support improving this regex in the future w/o regressing\n for existing use cases.\n ' match = COMPILED_DEFAULT_DENTINATION_REGEX.match(test_input) if match: assert (match.groupdict() == expected) else: raise AssertionError(f'{COMPILED_DEFAULT_DENTINATION_REGEX} did not match test case {test_input}.')<|docstring|>ensure our default regex handles each scenarios we document. this test is to support improving this regex in the future w/o regressing for existing use cases.<|endoftext|>
f5a2fe0e21e3efc89bbeabe248be9243c3dd84adff4fbf474f1c752e2e6af77d
def distance_picking(Loc1, Loc2, y_low, y_high): 'Calculate Picker Route Distance between two locations' (x1, y1) = (Loc1[0], Loc1[1]) (x2, y2) = (Loc2[0], Loc2[1]) distance_x = abs((x2 - x1)) if (x1 == x2): distance_y1 = abs((y2 - y1)) distance_y2 = distance_y1 else: distance_y1 = ((y_high - y1) + (y_high - y2)) distance_y2 = ((y1 - y_low) + (y2 - y_low)) distance_y = min(distance_y1, distance_y2) distance = (distance_x + distance_y) return int(distance)
Calculate Picker Route Distance between two locations
utils/routing/distances.py
distance_picking
corentin-glanum/pickingRoute
15
python
def distance_picking(Loc1, Loc2, y_low, y_high): (x1, y1) = (Loc1[0], Loc1[1]) (x2, y2) = (Loc2[0], Loc2[1]) distance_x = abs((x2 - x1)) if (x1 == x2): distance_y1 = abs((y2 - y1)) distance_y2 = distance_y1 else: distance_y1 = ((y_high - y1) + (y_high - y2)) distance_y2 = ((y1 - y_low) + (y2 - y_low)) distance_y = min(distance_y1, distance_y2) distance = (distance_x + distance_y) return int(distance)
def distance_picking(Loc1, Loc2, y_low, y_high): (x1, y1) = (Loc1[0], Loc1[1]) (x2, y2) = (Loc2[0], Loc2[1]) distance_x = abs((x2 - x1)) if (x1 == x2): distance_y1 = abs((y2 - y1)) distance_y2 = distance_y1 else: distance_y1 = ((y_high - y1) + (y_high - y2)) distance_y2 = ((y1 - y_low) + (y2 - y_low)) distance_y = min(distance_y1, distance_y2) distance = (distance_x + distance_y) return int(distance)<|docstring|>Calculate Picker Route Distance between two locations<|endoftext|>
5753d5c4c91f41a4df68aa5c22dce9511aeb76a3f75e434d512d85daed4c031a
def next_location(start_loc, list_locs, y_low, y_high): 'Find closest next location' list_dist = [distance_picking(start_loc, i, y_low, y_high) for i in list_locs] distance_next = min(list_dist) index_min = list_dist.index(min(list_dist)) next_loc = list_locs[index_min] list_locs.remove(next_loc) return (list_locs, start_loc, next_loc, distance_next)
Find closest next location
utils/routing/distances.py
next_location
corentin-glanum/pickingRoute
15
python
def next_location(start_loc, list_locs, y_low, y_high): list_dist = [distance_picking(start_loc, i, y_low, y_high) for i in list_locs] distance_next = min(list_dist) index_min = list_dist.index(min(list_dist)) next_loc = list_locs[index_min] list_locs.remove(next_loc) return (list_locs, start_loc, next_loc, distance_next)
def next_location(start_loc, list_locs, y_low, y_high): list_dist = [distance_picking(start_loc, i, y_low, y_high) for i in list_locs] distance_next = min(list_dist) index_min = list_dist.index(min(list_dist)) next_loc = list_locs[index_min] list_locs.remove(next_loc) return (list_locs, start_loc, next_loc, distance_next)<|docstring|>Find closest next location<|endoftext|>
dead9e94bea7d4f7b442bd4c66d2ca42c92d1a71f171a0aeea6f6ebcf5f3c072
def centroid(list_in): 'Centroid function' (x, y) = ([p[0] for p in list_in], [p[1] for p in list_in]) centroid = [round((sum(x) / len(list_in)), 2), round((sum(y) / len(list_in)), 2)] return centroid
Centroid function
utils/routing/distances.py
centroid
corentin-glanum/pickingRoute
15
python
def centroid(list_in): (x, y) = ([p[0] for p in list_in], [p[1] for p in list_in]) centroid = [round((sum(x) / len(list_in)), 2), round((sum(y) / len(list_in)), 2)] return centroid
def centroid(list_in): (x, y) = ([p[0] for p in list_in], [p[1] for p in list_in]) centroid = [round((sum(x) / len(list_in)), 2), round((sum(y) / len(list_in)), 2)] return centroid<|docstring|>Centroid function<|endoftext|>
6849f85937aac2a60201e81f8d7c720b62f4d66d87d7887d0d7d15e8f81c82d6
def centroid_mapping(df_multi): 'Mapping Centroids' df_multi['Coord'] = df_multi['Coord'].apply(literal_eval) df_group = pd.DataFrame(df_multi.groupby(['OrderNumber'])['Coord'].apply(list)).reset_index() df_group['Coord_Centroid'] = df_group['Coord'].apply(centroid) (list_order, list_coord) = (list(df_group.OrderNumber.values), list(df_group.Coord_Centroid.values)) dict_coord = dict(zip(list_order, list_coord)) df_multi['Coord_Cluster'] = df_multi['OrderNumber'].map(dict_coord).astype(str) df_multi['Coord'] = df_multi['Coord'].astype(str) return df_multi
Mapping Centroids
utils/routing/distances.py
centroid_mapping
corentin-glanum/pickingRoute
15
python
def centroid_mapping(df_multi): df_multi['Coord'] = df_multi['Coord'].apply(literal_eval) df_group = pd.DataFrame(df_multi.groupby(['OrderNumber'])['Coord'].apply(list)).reset_index() df_group['Coord_Centroid'] = df_group['Coord'].apply(centroid) (list_order, list_coord) = (list(df_group.OrderNumber.values), list(df_group.Coord_Centroid.values)) dict_coord = dict(zip(list_order, list_coord)) df_multi['Coord_Cluster'] = df_multi['OrderNumber'].map(dict_coord).astype(str) df_multi['Coord'] = df_multi['Coord'].astype(str) return df_multi
def centroid_mapping(df_multi): df_multi['Coord'] = df_multi['Coord'].apply(literal_eval) df_group = pd.DataFrame(df_multi.groupby(['OrderNumber'])['Coord'].apply(list)).reset_index() df_group['Coord_Centroid'] = df_group['Coord'].apply(centroid) (list_order, list_coord) = (list(df_group.OrderNumber.values), list(df_group.Coord_Centroid.values)) dict_coord = dict(zip(list_order, list_coord)) df_multi['Coord_Cluster'] = df_multi['OrderNumber'].map(dict_coord).astype(str) df_multi['Coord'] = df_multi['Coord'].astype(str) return df_multi<|docstring|>Mapping Centroids<|endoftext|>
0a6dac903117f8049a7def9f31402c937ac8785032caa333d2a8a2b271e00e15
def initialize_with_zeros(dim): '\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n \n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n \n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n ' w = None b = None w = np.zeros((dim, 1)) b = 0 assert (w.shape == (dim, 1)) assert (isinstance(b, float) or isinstance(b, int)) return (w, b)
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0. Argument: dim -- size of the w vector we want (or number of parameters in this case) Returns: w -- initialized vector of shape (dim, 1) b -- initialized scalar (corresponds to the bias)
common.py
initialize_with_zeros
navyverma/deep_implementation
0
python
def initialize_with_zeros(dim): '\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n \n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n \n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n ' w = None b = None w = np.zeros((dim, 1)) b = 0 assert (w.shape == (dim, 1)) assert (isinstance(b, float) or isinstance(b, int)) return (w, b)
def initialize_with_zeros(dim): '\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n \n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n \n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n ' w = None b = None w = np.zeros((dim, 1)) b = 0 assert (w.shape == (dim, 1)) assert (isinstance(b, float) or isinstance(b, int)) return (w, b)<|docstring|>This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0. Argument: dim -- size of the w vector we want (or number of parameters in this case) Returns: w -- initialized vector of shape (dim, 1) b -- initialized scalar (corresponds to the bias)<|endoftext|>
2a5d028cf87b3daa73a9a709aec89d0910c570fb26ccafbedd5b9d62f7000e6a
def sigmoid(z): '\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n ' s = (1 / (1 + np.exp((- z)))) return s
Compute the sigmoid of z Arguments: z -- A scalar or numpy array of any size. Return: s -- sigmoid(z)
common.py
sigmoid
navyverma/deep_implementation
0
python
def sigmoid(z): '\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n ' s = (1 / (1 + np.exp((- z)))) return s
def sigmoid(z): '\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n ' s = (1 / (1 + np.exp((- z)))) return s<|docstring|>Compute the sigmoid of z Arguments: z -- A scalar or numpy array of any size. Return: s -- sigmoid(z)<|endoftext|>
ebc212131c9c94f6748895c2b6e9b30f7c810f13c9d0f5b1c4e6304bd7e48f92
def sigmoid_cache(Z): '\n Implements the sigmoid activation in numpy\n \n Arguments:\n Z -- numpy array of any shape\n \n Returns:\n A -- output of sigmoid(z), same shape as Z\n cache -- returns Z as well, useful during backpropagation\n ' A = (1 / (1 + np.exp((- Z)))) cache = Z return (A, cache)
Implements the sigmoid activation in numpy Arguments: Z -- numpy array of any shape Returns: A -- output of sigmoid(z), same shape as Z cache -- returns Z as well, useful during backpropagation
common.py
sigmoid_cache
navyverma/deep_implementation
0
python
def sigmoid_cache(Z): '\n Implements the sigmoid activation in numpy\n \n Arguments:\n Z -- numpy array of any shape\n \n Returns:\n A -- output of sigmoid(z), same shape as Z\n cache -- returns Z as well, useful during backpropagation\n ' A = (1 / (1 + np.exp((- Z)))) cache = Z return (A, cache)
def sigmoid_cache(Z): '\n Implements the sigmoid activation in numpy\n \n Arguments:\n Z -- numpy array of any shape\n \n Returns:\n A -- output of sigmoid(z), same shape as Z\n cache -- returns Z as well, useful during backpropagation\n ' A = (1 / (1 + np.exp((- Z)))) cache = Z return (A, cache)<|docstring|>Implements the sigmoid activation in numpy Arguments: Z -- numpy array of any shape Returns: A -- output of sigmoid(z), same shape as Z cache -- returns Z as well, useful during backpropagation<|endoftext|>
428f82996ab844bf7ed81b5d5ebc9a42086a8add4a81f294697b3012be174d9e
def relu(Z): '\n Implement the RELU function.\n Arguments:\n Z -- Output of the linear layer, of any shape\n Returns:\n A -- Post-activation parameter, of the same shape as Z\n cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently\n ' A = np.maximum(0, Z) assert (A.shape == Z.shape) cache = Z return (A, cache)
Implement the RELU function. Arguments: Z -- Output of the linear layer, of any shape Returns: A -- Post-activation parameter, of the same shape as Z cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
common.py
relu
navyverma/deep_implementation
0
python
def relu(Z): '\n Implement the RELU function.\n Arguments:\n Z -- Output of the linear layer, of any shape\n Returns:\n A -- Post-activation parameter, of the same shape as Z\n cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently\n ' A = np.maximum(0, Z) assert (A.shape == Z.shape) cache = Z return (A, cache)
def relu(Z): '\n Implement the RELU function.\n Arguments:\n Z -- Output of the linear layer, of any shape\n Returns:\n A -- Post-activation parameter, of the same shape as Z\n cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently\n ' A = np.maximum(0, Z) assert (A.shape == Z.shape) cache = Z return (A, cache)<|docstring|>Implement the RELU function. Arguments: Z -- Output of the linear layer, of any shape Returns: A -- Post-activation parameter, of the same shape as Z cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently<|endoftext|>
6ebb40c758c22c6c0489e5f02bda5367d1aa97608ee379a31a7ef3181be58b4c
def relu_cache(Z): '\n Implement the RELU function.\n Arguments:\n Z -- Output of the linear layer, of any shape\n Returns:\n A -- Post-activation parameter, of the same shape as Z\n cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently\n ' A = np.maximum(0, Z) assert (A.shape == Z.shape) cache = Z return (A, cache)
Implement the RELU function. Arguments: Z -- Output of the linear layer, of any shape Returns: A -- Post-activation parameter, of the same shape as Z cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
common.py
relu_cache
navyverma/deep_implementation
0
python
def relu_cache(Z): '\n Implement the RELU function.\n Arguments:\n Z -- Output of the linear layer, of any shape\n Returns:\n A -- Post-activation parameter, of the same shape as Z\n cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently\n ' A = np.maximum(0, Z) assert (A.shape == Z.shape) cache = Z return (A, cache)
def relu_cache(Z): '\n Implement the RELU function.\n Arguments:\n Z -- Output of the linear layer, of any shape\n Returns:\n A -- Post-activation parameter, of the same shape as Z\n cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently\n ' A = np.maximum(0, Z) assert (A.shape == Z.shape) cache = Z return (A, cache)<|docstring|>Implement the RELU function. Arguments: Z -- Output of the linear layer, of any shape Returns: A -- Post-activation parameter, of the same shape as Z cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently<|endoftext|>
749acf9a62a1d565e41c1050253a6f89f1b74ed4dbbb4ec65e9eb17ecf1008a0
def relu_backward(dA, cache): "\n Implement the backward propagation for a single RELU unit.\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n Returns:\n dZ -- Gradient of the cost with respect to Z\n " Z = cache dZ = np.array(dA, copy=True) dZ[(Z <= 0)] = 0 assert (dZ.shape == Z.shape) return dZ
Implement the backward propagation for a single RELU unit. Arguments: dA -- post-activation gradient, of any shape cache -- 'Z' where we store for computing backward propagation efficiently Returns: dZ -- Gradient of the cost with respect to Z
common.py
relu_backward
navyverma/deep_implementation
0
python
def relu_backward(dA, cache): "\n Implement the backward propagation for a single RELU unit.\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n Returns:\n dZ -- Gradient of the cost with respect to Z\n " Z = cache dZ = np.array(dA, copy=True) dZ[(Z <= 0)] = 0 assert (dZ.shape == Z.shape) return dZ
def relu_backward(dA, cache): "\n Implement the backward propagation for a single RELU unit.\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n Returns:\n dZ -- Gradient of the cost with respect to Z\n " Z = cache dZ = np.array(dA, copy=True) dZ[(Z <= 0)] = 0 assert (dZ.shape == Z.shape) return dZ<|docstring|>Implement the backward propagation for a single RELU unit. Arguments: dA -- post-activation gradient, of any shape cache -- 'Z' where we store for computing backward propagation efficiently Returns: dZ -- Gradient of the cost with respect to Z<|endoftext|>
8c6ddcf2d4fe7a2084e94c024dc2c04a53afb8b963fb7160c5344b3d41e14619
def sigmoid_backward(dA, cache): "\n Implement the backward propagation for a single SIGMOID unit.\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n Returns:\n dZ -- Gradient of the cost with respect to Z\n " Z = cache s = (1 / (1 + np.exp((- Z)))) dZ = ((dA * s) * (1 - s)) assert (dZ.shape == Z.shape) return dZ
Implement the backward propagation for a single SIGMOID unit. Arguments: dA -- post-activation gradient, of any shape cache -- 'Z' where we store for computing backward propagation efficiently Returns: dZ -- Gradient of the cost with respect to Z
common.py
sigmoid_backward
navyverma/deep_implementation
0
python
def sigmoid_backward(dA, cache): "\n Implement the backward propagation for a single SIGMOID unit.\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n Returns:\n dZ -- Gradient of the cost with respect to Z\n " Z = cache s = (1 / (1 + np.exp((- Z)))) dZ = ((dA * s) * (1 - s)) assert (dZ.shape == Z.shape) return dZ
def sigmoid_backward(dA, cache): "\n Implement the backward propagation for a single SIGMOID unit.\n Arguments:\n dA -- post-activation gradient, of any shape\n cache -- 'Z' where we store for computing backward propagation efficiently\n Returns:\n dZ -- Gradient of the cost with respect to Z\n " Z = cache s = (1 / (1 + np.exp((- Z)))) dZ = ((dA * s) * (1 - s)) assert (dZ.shape == Z.shape) return dZ<|docstring|>Implement the backward propagation for a single SIGMOID unit. Arguments: dA -- post-activation gradient, of any shape cache -- 'Z' where we store for computing backward propagation efficiently Returns: dZ -- Gradient of the cost with respect to Z<|endoftext|>
ce697f6e26ee6645758298f641643fcd71a10999bf63d7f70c6d8be2fcf78da0
def print_mislabeled_images(classes, X, y, p): '\n Plots images where predictions and truth were different.\n X -- dataset\n y -- true labels\n p -- predictions\n ' a = (p + y) mislabeled_indices = np.asarray(np.where((a == 1))) plt.rcParams['figure.figsize'] = (40.0, 40.0) num_images = len(mislabeled_indices[0]) for i in range(num_images): index = mislabeled_indices[1][i] plt.subplot(2, num_images, (i + 1)) plt.imshow(X[(:, index)].reshape(64, 64, 3), interpolation='nearest') plt.axis('off') plt.title(((('Prediction: ' + classes[int(p[(0, index)])].decode('utf-8')) + ' \n Class: ') + classes[y[(0, index)]].decode('utf-8')))
Plots images where predictions and truth were different. X -- dataset y -- true labels p -- predictions
common.py
print_mislabeled_images
navyverma/deep_implementation
0
python
def print_mislabeled_images(classes, X, y, p): '\n Plots images where predictions and truth were different.\n X -- dataset\n y -- true labels\n p -- predictions\n ' a = (p + y) mislabeled_indices = np.asarray(np.where((a == 1))) plt.rcParams['figure.figsize'] = (40.0, 40.0) num_images = len(mislabeled_indices[0]) for i in range(num_images): index = mislabeled_indices[1][i] plt.subplot(2, num_images, (i + 1)) plt.imshow(X[(:, index)].reshape(64, 64, 3), interpolation='nearest') plt.axis('off') plt.title(((('Prediction: ' + classes[int(p[(0, index)])].decode('utf-8')) + ' \n Class: ') + classes[y[(0, index)]].decode('utf-8')))
def print_mislabeled_images(classes, X, y, p): '\n Plots images where predictions and truth were different.\n X -- dataset\n y -- true labels\n p -- predictions\n ' a = (p + y) mislabeled_indices = np.asarray(np.where((a == 1))) plt.rcParams['figure.figsize'] = (40.0, 40.0) num_images = len(mislabeled_indices[0]) for i in range(num_images): index = mislabeled_indices[1][i] plt.subplot(2, num_images, (i + 1)) plt.imshow(X[(:, index)].reshape(64, 64, 3), interpolation='nearest') plt.axis('off') plt.title(((('Prediction: ' + classes[int(p[(0, index)])].decode('utf-8')) + ' \n Class: ') + classes[y[(0, index)]].decode('utf-8')))<|docstring|>Plots images where predictions and truth were different. X -- dataset y -- true labels p -- predictions<|endoftext|>
c9781ba427f9fafb77ff0ab8a3e07c990d281499c67e9a1eeb32ec041e3235ef
@property def lens(self): '\n A :py:class:`~MulensModel.mulensobjects.lens.Lens` object.\n Physical properties of the lens. Note: lens mass must be in\n solMasses.\n ' return self._lens
A :py:class:`~MulensModel.mulensobjects.lens.Lens` object. Physical properties of the lens. Note: lens mass must be in solMasses.
source/MulensModel/mulensobjects/mulenssystem.py
lens
KKruszynska/MulensModel
30
python
@property def lens(self): '\n A :py:class:`~MulensModel.mulensobjects.lens.Lens` object.\n Physical properties of the lens. Note: lens mass must be in\n solMasses.\n ' return self._lens
@property def lens(self): '\n A :py:class:`~MulensModel.mulensobjects.lens.Lens` object.\n Physical properties of the lens. Note: lens mass must be in\n solMasses.\n ' return self._lens<|docstring|>A :py:class:`~MulensModel.mulensobjects.lens.Lens` object. Physical properties of the lens. Note: lens mass must be in solMasses.<|endoftext|>
12daf8cfcd84d8df77e9c12eb1eefbd180e2594ede35ad2efead304b7338e419
@property def source(self): '\n :py:class:`~MulensModel.mulensobjects.source.Source` object.\n Physical properties of the source.\n ' return self._source
:py:class:`~MulensModel.mulensobjects.source.Source` object. Physical properties of the source.
source/MulensModel/mulensobjects/mulenssystem.py
source
KKruszynska/MulensModel
30
python
@property def source(self): '\n :py:class:`~MulensModel.mulensobjects.source.Source` object.\n Physical properties of the source.\n ' return self._source
@property def source(self): '\n :py:class:`~MulensModel.mulensobjects.source.Source` object.\n Physical properties of the source.\n ' return self._source<|docstring|>:py:class:`~MulensModel.mulensobjects.source.Source` object. Physical properties of the source.<|endoftext|>
24c5000528d729233f2cb5b03bc02e5edbb15ccac951bd6523bc428c358687e4
@property def mu_rel(self): '\n *astropy.Quantity*\n\n Relative proper motion between the source and lens\n stars. If set as a *float*, units are assumed to be mas/yr.\n ' return self._mu_rel
*astropy.Quantity* Relative proper motion between the source and lens stars. If set as a *float*, units are assumed to be mas/yr.
source/MulensModel/mulensobjects/mulenssystem.py
mu_rel
KKruszynska/MulensModel
30
python
@property def mu_rel(self): '\n *astropy.Quantity*\n\n Relative proper motion between the source and lens\n stars. If set as a *float*, units are assumed to be mas/yr.\n ' return self._mu_rel
@property def mu_rel(self): '\n *astropy.Quantity*\n\n Relative proper motion between the source and lens\n stars. If set as a *float*, units are assumed to be mas/yr.\n ' return self._mu_rel<|docstring|>*astropy.Quantity* Relative proper motion between the source and lens stars. If set as a *float*, units are assumed to be mas/yr.<|endoftext|>
9b2bf40931a1f46810e6eead7718cafe1a08913d2e53f823b51f98471bbd6729
@property def t_E(self): '\n *astropy.Quantity*\n\n The Einstein crossing time (in days). If set as a *float*,\n assumes units are in days.\n ' try: t_E = (self.theta_E / self.mu_rel) return t_E.to(u.day) except Exception: return None
*astropy.Quantity* The Einstein crossing time (in days). If set as a *float*, assumes units are in days.
source/MulensModel/mulensobjects/mulenssystem.py
t_E
KKruszynska/MulensModel
30
python
@property def t_E(self): '\n *astropy.Quantity*\n\n The Einstein crossing time (in days). If set as a *float*,\n assumes units are in days.\n ' try: t_E = (self.theta_E / self.mu_rel) return t_E.to(u.day) except Exception: return None
@property def t_E(self): '\n *astropy.Quantity*\n\n The Einstein crossing time (in days). If set as a *float*,\n assumes units are in days.\n ' try: t_E = (self.theta_E / self.mu_rel) return t_E.to(u.day) except Exception: return None<|docstring|>*astropy.Quantity* The Einstein crossing time (in days). If set as a *float*, assumes units are in days.<|endoftext|>
9137471b2b683193b617868f6ab0fbb896e972ae42450b7524dc817fa7e1ffec
@property def pi_rel(self): '\n *astropy.Quantity*, read-only\n\n The source-lens relative parallax in milliarcseconds.\n ' return (self.lens.pi_L.to(u.mas) - self.source.pi_S.to(u.mas))
*astropy.Quantity*, read-only The source-lens relative parallax in milliarcseconds.
source/MulensModel/mulensobjects/mulenssystem.py
pi_rel
KKruszynska/MulensModel
30
python
@property def pi_rel(self): '\n *astropy.Quantity*, read-only\n\n The source-lens relative parallax in milliarcseconds.\n ' return (self.lens.pi_L.to(u.mas) - self.source.pi_S.to(u.mas))
@property def pi_rel(self): '\n *astropy.Quantity*, read-only\n\n The source-lens relative parallax in milliarcseconds.\n ' return (self.lens.pi_L.to(u.mas) - self.source.pi_S.to(u.mas))<|docstring|>*astropy.Quantity*, read-only The source-lens relative parallax in milliarcseconds.<|endoftext|>
a927bcad898ce0ae5ff1b242b2c4998b0b7f15ea0c9a1791ad4d7fa29fbf5cb0
@property def pi_E(self): "\n *float*, read-only\n\n The Einstein ring radius. It's equal to pi_rel / theta_E.\n Dimensionless.\n " return (self.pi_rel / self.theta_E).decompose().value
*float*, read-only The Einstein ring radius. It's equal to pi_rel / theta_E. Dimensionless.
source/MulensModel/mulensobjects/mulenssystem.py
pi_E
KKruszynska/MulensModel
30
python
@property def pi_E(self): "\n *float*, read-only\n\n The Einstein ring radius. It's equal to pi_rel / theta_E.\n Dimensionless.\n " return (self.pi_rel / self.theta_E).decompose().value
@property def pi_E(self): "\n *float*, read-only\n\n The Einstein ring radius. It's equal to pi_rel / theta_E.\n Dimensionless.\n " return (self.pi_rel / self.theta_E).decompose().value<|docstring|>*float*, read-only The Einstein ring radius. It's equal to pi_rel / theta_E. Dimensionless.<|endoftext|>
49f57c1bcba8098965eaacfd74f43f2cf367330438fe6e512142ab51b0371ceb
@property def theta_E(self): '\n *astropy.Quantity*, read-only\n\n The angular Einstein Radius in milliarcseconds.\n ' kappa = ((4.0 * G) / ((c ** 2) * au)).to((u.mas / u.Msun), equivalencies=u.dimensionless_angles()) return np.sqrt(((kappa * self.lens.total_mass.to(u.solMass)) * self.pi_rel.to(u.mas)))
*astropy.Quantity*, read-only The angular Einstein Radius in milliarcseconds.
source/MulensModel/mulensobjects/mulenssystem.py
theta_E
KKruszynska/MulensModel
30
python
@property def theta_E(self): '\n *astropy.Quantity*, read-only\n\n The angular Einstein Radius in milliarcseconds.\n ' kappa = ((4.0 * G) / ((c ** 2) * au)).to((u.mas / u.Msun), equivalencies=u.dimensionless_angles()) return np.sqrt(((kappa * self.lens.total_mass.to(u.solMass)) * self.pi_rel.to(u.mas)))
@property def theta_E(self): '\n *astropy.Quantity*, read-only\n\n The angular Einstein Radius in milliarcseconds.\n ' kappa = ((4.0 * G) / ((c ** 2) * au)).to((u.mas / u.Msun), equivalencies=u.dimensionless_angles()) return np.sqrt(((kappa * self.lens.total_mass.to(u.solMass)) * self.pi_rel.to(u.mas)))<|docstring|>*astropy.Quantity*, read-only The angular Einstein Radius in milliarcseconds.<|endoftext|>
38dacf678a4450dc3e6dcbdd1aaee2f13fae265605472e2c7154cbb5203cdfe1
@property def r_E(self): '\n *astropy.Quantity*, read-only\n\n The physical size of the Einstein Radius in the Lens plane (in AU).\n ' return (self.lens.distance * self.theta_E.to('', equivalencies=u.dimensionless_angles())).to(u.au)
*astropy.Quantity*, read-only The physical size of the Einstein Radius in the Lens plane (in AU).
source/MulensModel/mulensobjects/mulenssystem.py
r_E
KKruszynska/MulensModel
30
python
@property def r_E(self): '\n *astropy.Quantity*, read-only\n\n The physical size of the Einstein Radius in the Lens plane (in AU).\n ' return (self.lens.distance * self.theta_E.to(, equivalencies=u.dimensionless_angles())).to(u.au)
@property def r_E(self): '\n *astropy.Quantity*, read-only\n\n The physical size of the Einstein Radius in the Lens plane (in AU).\n ' return (self.lens.distance * self.theta_E.to(, equivalencies=u.dimensionless_angles())).to(u.au)<|docstring|>*astropy.Quantity*, read-only The physical size of the Einstein Radius in the Lens plane (in AU).<|endoftext|>
d2efe5a83439b7da946175aea169926a9cf7b4cb3ec3647696d388412b496580
@property def r_E_tilde(self): '\n *astropy.Quantity*, read-only\n\n The physical size of the Einstein Radius projected onto the\n Observer plane (in AU).\n ' return ((self.r_E * self.source.distance) / (self.source.distance - self.lens.distance))
*astropy.Quantity*, read-only The physical size of the Einstein Radius projected onto the Observer plane (in AU).
source/MulensModel/mulensobjects/mulenssystem.py
r_E_tilde
KKruszynska/MulensModel
30
python
@property def r_E_tilde(self): '\n *astropy.Quantity*, read-only\n\n The physical size of the Einstein Radius projected onto the\n Observer plane (in AU).\n ' return ((self.r_E * self.source.distance) / (self.source.distance - self.lens.distance))
@property def r_E_tilde(self): '\n *astropy.Quantity*, read-only\n\n The physical size of the Einstein Radius projected onto the\n Observer plane (in AU).\n ' return ((self.r_E * self.source.distance) / (self.source.distance - self.lens.distance))<|docstring|>*astropy.Quantity*, read-only The physical size of the Einstein Radius projected onto the Observer plane (in AU).<|endoftext|>
e76ffb0c6eafb552a68238243f910bb03e3601f3bf01b0c7b1ca33247a0acc22
def plot_magnification(self, u_0=None, alpha=None, **kwargs): '\n Plot the magnification curve for the lens. u_0 must always be\n specified. If the lens has more than one body, alpha must also\n be specified.\n\n Parameters :\n u_0: *float*\n Impact parameter between the source and the lens (as a\n fraction of the Einstein ring)\n\n alpha: *astropy.Quantity*, *float*\n If *float* then degrees are assumed as a unit.\n See :py:obj:`MulensModel.modelparameters.ModelParameters.alpha`\n\n ``**kwargs``:\n See :py:func:`MulensModel.model.Model.plot_magnification()`\n ' if (u_0 is None): raise AttributeError('u_0 is required') else: parameters = {'t_0': 0.0, 'u_0': u_0} if (self.t_E is not None): parameters['t_E'] = self.t_E xtitle = 'Time (days)' else: parameters['t_E'] = 1.0 xtitle = 'Time (tE)' if (self.source.angular_radius is not None): parameters['rho'] = (self.source.angular_radius.to(u.mas) / self.theta_E.to(u.mas)) if (self.lens.n_masses > 1): parameters['q'] = self.lens.q parameters['s'] = self.lens.s if (alpha is None): raise AttributeError('alpha is required for 2-body lenses.') else: parameters['alpha'] = alpha model = Model(parameters=parameters) model.plot_magnification(**kwargs) plt.xlabel(xtitle)
Plot the magnification curve for the lens. u_0 must always be specified. If the lens has more than one body, alpha must also be specified. Parameters : u_0: *float* Impact parameter between the source and the lens (as a fraction of the Einstein ring) alpha: *astropy.Quantity*, *float* If *float* then degrees are assumed as a unit. See :py:obj:`MulensModel.modelparameters.ModelParameters.alpha` ``**kwargs``: See :py:func:`MulensModel.model.Model.plot_magnification()`
source/MulensModel/mulensobjects/mulenssystem.py
plot_magnification
KKruszynska/MulensModel
30
python
def plot_magnification(self, u_0=None, alpha=None, **kwargs): '\n Plot the magnification curve for the lens. u_0 must always be\n specified. If the lens has more than one body, alpha must also\n be specified.\n\n Parameters :\n u_0: *float*\n Impact parameter between the source and the lens (as a\n fraction of the Einstein ring)\n\n alpha: *astropy.Quantity*, *float*\n If *float* then degrees are assumed as a unit.\n See :py:obj:`MulensModel.modelparameters.ModelParameters.alpha`\n\n ``**kwargs``:\n See :py:func:`MulensModel.model.Model.plot_magnification()`\n ' if (u_0 is None): raise AttributeError('u_0 is required') else: parameters = {'t_0': 0.0, 'u_0': u_0} if (self.t_E is not None): parameters['t_E'] = self.t_E xtitle = 'Time (days)' else: parameters['t_E'] = 1.0 xtitle = 'Time (tE)' if (self.source.angular_radius is not None): parameters['rho'] = (self.source.angular_radius.to(u.mas) / self.theta_E.to(u.mas)) if (self.lens.n_masses > 1): parameters['q'] = self.lens.q parameters['s'] = self.lens.s if (alpha is None): raise AttributeError('alpha is required for 2-body lenses.') else: parameters['alpha'] = alpha model = Model(parameters=parameters) model.plot_magnification(**kwargs) plt.xlabel(xtitle)
def plot_magnification(self, u_0=None, alpha=None, **kwargs): '\n Plot the magnification curve for the lens. u_0 must always be\n specified. If the lens has more than one body, alpha must also\n be specified.\n\n Parameters :\n u_0: *float*\n Impact parameter between the source and the lens (as a\n fraction of the Einstein ring)\n\n alpha: *astropy.Quantity*, *float*\n If *float* then degrees are assumed as a unit.\n See :py:obj:`MulensModel.modelparameters.ModelParameters.alpha`\n\n ``**kwargs``:\n See :py:func:`MulensModel.model.Model.plot_magnification()`\n ' if (u_0 is None): raise AttributeError('u_0 is required') else: parameters = {'t_0': 0.0, 'u_0': u_0} if (self.t_E is not None): parameters['t_E'] = self.t_E xtitle = 'Time (days)' else: parameters['t_E'] = 1.0 xtitle = 'Time (tE)' if (self.source.angular_radius is not None): parameters['rho'] = (self.source.angular_radius.to(u.mas) / self.theta_E.to(u.mas)) if (self.lens.n_masses > 1): parameters['q'] = self.lens.q parameters['s'] = self.lens.s if (alpha is None): raise AttributeError('alpha is required for 2-body lenses.') else: parameters['alpha'] = alpha model = Model(parameters=parameters) model.plot_magnification(**kwargs) plt.xlabel(xtitle)<|docstring|>Plot the magnification curve for the lens. u_0 must always be specified. If the lens has more than one body, alpha must also be specified. Parameters : u_0: *float* Impact parameter between the source and the lens (as a fraction of the Einstein ring) alpha: *astropy.Quantity*, *float* If *float* then degrees are assumed as a unit. See :py:obj:`MulensModel.modelparameters.ModelParameters.alpha` ``**kwargs``: See :py:func:`MulensModel.model.Model.plot_magnification()`<|endoftext|>
9df4ae4d24148795d4fa65b2bce343d58415aced7ce0ccdfeef8897d2241dc4c
def plot_caustics(self, n_points=5000, **kwargs): '\n Plot the caustics structure using `Pyplot scatter`_. See\n :py:func:`MulensModel.caustics.Caustics.plot()`\n\n Parameters :\n n_points: *int*\n Number of points be plotted.\n\n ``**kwargs``:\n Keyword arguments passed to `Pyplot scatter`\n\n .. _Pyplot scatter:\n https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter\n\n ' self.lens.plot_caustics(n_points=n_points, **kwargs)
Plot the caustics structure using `Pyplot scatter`_. See :py:func:`MulensModel.caustics.Caustics.plot()` Parameters : n_points: *int* Number of points be plotted. ``**kwargs``: Keyword arguments passed to `Pyplot scatter` .. _Pyplot scatter: https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter
source/MulensModel/mulensobjects/mulenssystem.py
plot_caustics
KKruszynska/MulensModel
30
python
def plot_caustics(self, n_points=5000, **kwargs): '\n Plot the caustics structure using `Pyplot scatter`_. See\n :py:func:`MulensModel.caustics.Caustics.plot()`\n\n Parameters :\n n_points: *int*\n Number of points be plotted.\n\n ``**kwargs``:\n Keyword arguments passed to `Pyplot scatter`\n\n .. _Pyplot scatter:\n https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter\n\n ' self.lens.plot_caustics(n_points=n_points, **kwargs)
def plot_caustics(self, n_points=5000, **kwargs): '\n Plot the caustics structure using `Pyplot scatter`_. See\n :py:func:`MulensModel.caustics.Caustics.plot()`\n\n Parameters :\n n_points: *int*\n Number of points be plotted.\n\n ``**kwargs``:\n Keyword arguments passed to `Pyplot scatter`\n\n .. _Pyplot scatter:\n https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter\n\n ' self.lens.plot_caustics(n_points=n_points, **kwargs)<|docstring|>Plot the caustics structure using `Pyplot scatter`_. See :py:func:`MulensModel.caustics.Caustics.plot()` Parameters : n_points: *int* Number of points be plotted. ``**kwargs``: Keyword arguments passed to `Pyplot scatter` .. _Pyplot scatter: https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.scatter<|endoftext|>
66b0f2b52140a442172884fbb71bd1c51194aa4ab6f7a7d565bd51eed68ad1d2
@property def html_content(self): '\n Generate HTML representation of the markdown-formatted blog entry,\n and also convert any media URLs into rich media objects such as video\n players or images.\n ' hilite = CodeHiliteExtension(linenums=False, css_class='highlight') extras = ExtraExtension() markdown_content = markdown(self.content, extensions=[hilite, extras]) oembed_content = parse_html(markdown_content, oembed_providers, urlize_all=True, maxwidth=app.config['SITE_WIDTH']) return Markup(oembed_content)
Generate HTML representation of the markdown-formatted blog entry, and also convert any media URLs into rich media objects such as video players or images.
presenter/models.py
html_content
dkkline/CanSat14-15
0
python
@property def html_content(self): '\n Generate HTML representation of the markdown-formatted blog entry,\n and also convert any media URLs into rich media objects such as video\n players or images.\n ' hilite = CodeHiliteExtension(linenums=False, css_class='highlight') extras = ExtraExtension() markdown_content = markdown(self.content, extensions=[hilite, extras]) oembed_content = parse_html(markdown_content, oembed_providers, urlize_all=True, maxwidth=app.config['SITE_WIDTH']) return Markup(oembed_content)
@property def html_content(self): '\n Generate HTML representation of the markdown-formatted blog entry,\n and also convert any media URLs into rich media objects such as video\n players or images.\n ' hilite = CodeHiliteExtension(linenums=False, css_class='highlight') extras = ExtraExtension() markdown_content = markdown(self.content, extensions=[hilite, extras]) oembed_content = parse_html(markdown_content, oembed_providers, urlize_all=True, maxwidth=app.config['SITE_WIDTH']) return Markup(oembed_content)<|docstring|>Generate HTML representation of the markdown-formatted blog entry, and also convert any media URLs into rich media objects such as video players or images.<|endoftext|>
4ae27ecabe6ae908f3b76fbad0b38fca1301d0f4d3e2563305f84f0c022cdb10
def save(self, *args, **kwargs): '\n Saves the entry to the database.\n ' if (not self.slug): self.slug = re.sub('[^\\w)]+', '-', self.title.lower()) ret = super(Entry, self).save(*args, **kwargs) return ret
Saves the entry to the database.
presenter/models.py
save
dkkline/CanSat14-15
0
python
def save(self, *args, **kwargs): '\n \n ' if (not self.slug): self.slug = re.sub('[^\\w)]+', '-', self.title.lower()) ret = super(Entry, self).save(*args, **kwargs) return ret
def save(self, *args, **kwargs): '\n \n ' if (not self.slug): self.slug = re.sub('[^\\w)]+', '-', self.title.lower()) ret = super(Entry, self).save(*args, **kwargs) return ret<|docstring|>Saves the entry to the database.<|endoftext|>
53c63668524a227f4b2ace17c2d6b430c533a8ff000dfa8974438ad1422a2520
@classmethod def public(cls): '\n Returns the published entries.\n ' return Entry.select().where((Entry.published == True))
Returns the published entries.
presenter/models.py
public
dkkline/CanSat14-15
0
python
@classmethod def public(cls): '\n \n ' return Entry.select().where((Entry.published == True))
@classmethod def public(cls): '\n \n ' return Entry.select().where((Entry.published == True))<|docstring|>Returns the published entries.<|endoftext|>
aad54caa502e7ae20ed5198316f3182fdbfc8eabf91e347a9b25dd4ec1aed00e
@classmethod def draft(cls): '\n Returns the drafts among the entries.\n ' return Entry.select().where((Entry.published == False))
Returns the drafts among the entries.
presenter/models.py
draft
dkkline/CanSat14-15
0
python
@classmethod def draft(cls): '\n \n ' return Entry.select().where((Entry.published == False))
@classmethod def draft(cls): '\n \n ' return Entry.select().where((Entry.published == False))<|docstring|>Returns the drafts among the entries.<|endoftext|>
62c1eb75a50a967f632c01b8b6d6fe305d830b45ec86e6252e1f1531d232efe1
def test_cyclic_core_recursion(): 'Two cyclic cores, in orthogonal subspaces.' fol = _fol.Context() fol.declare(x=(0, 1), y=(0, 1), z=(0, 1), u=(0, 1), v=(0, 1), w=(0, 1)) s = '\n (\n \\/ (z = 1 /\\ y = 0)\n \\/ (x = 0 /\\ z = 1)\n \\/ (y = 1 /\\ x = 0)\n \\/ (y = 1 /\\ z = 0)\n \\/ (x = 1 /\\ z = 0)\n \\/ (x = 1 /\\ y = 0)\n ) \\/\n (\n \\/ (w = 1 /\\ v = 0)\n \\/ (u = 0 /\\ w = 1)\n \\/ (v = 1 /\\ u = 0)\n \\/ (v = 1 /\\ w = 0)\n \\/ (u = 1 /\\ w = 0)\n \\/ (u = 1 /\\ v = 0)\n )\n ' f = fol.add_expr(s) care_set = fol.true cover = cov.minimize(f, care_set, fol) n = fol.count(cover) assert (n == 6), n
Two cyclic cores, in orthogonal subspaces.
tests/cover_test.py
test_cyclic_core_recursion
tulip-control/omega
24
python
def test_cyclic_core_recursion(): fol = _fol.Context() fol.declare(x=(0, 1), y=(0, 1), z=(0, 1), u=(0, 1), v=(0, 1), w=(0, 1)) s = '\n (\n \\/ (z = 1 /\\ y = 0)\n \\/ (x = 0 /\\ z = 1)\n \\/ (y = 1 /\\ x = 0)\n \\/ (y = 1 /\\ z = 0)\n \\/ (x = 1 /\\ z = 0)\n \\/ (x = 1 /\\ y = 0)\n ) \\/\n (\n \\/ (w = 1 /\\ v = 0)\n \\/ (u = 0 /\\ w = 1)\n \\/ (v = 1 /\\ u = 0)\n \\/ (v = 1 /\\ w = 0)\n \\/ (u = 1 /\\ w = 0)\n \\/ (u = 1 /\\ v = 0)\n )\n ' f = fol.add_expr(s) care_set = fol.true cover = cov.minimize(f, care_set, fol) n = fol.count(cover) assert (n == 6), n
def test_cyclic_core_recursion(): fol = _fol.Context() fol.declare(x=(0, 1), y=(0, 1), z=(0, 1), u=(0, 1), v=(0, 1), w=(0, 1)) s = '\n (\n \\/ (z = 1 /\\ y = 0)\n \\/ (x = 0 /\\ z = 1)\n \\/ (y = 1 /\\ x = 0)\n \\/ (y = 1 /\\ z = 0)\n \\/ (x = 1 /\\ z = 0)\n \\/ (x = 1 /\\ y = 0)\n ) \\/\n (\n \\/ (w = 1 /\\ v = 0)\n \\/ (u = 0 /\\ w = 1)\n \\/ (v = 1 /\\ u = 0)\n \\/ (v = 1 /\\ w = 0)\n \\/ (u = 1 /\\ w = 0)\n \\/ (u = 1 /\\ v = 0)\n )\n ' f = fol.add_expr(s) care_set = fol.true cover = cov.minimize(f, care_set, fol) n = fol.count(cover) assert (n == 6), n<|docstring|>Two cyclic cores, in orthogonal subspaces.<|endoftext|>
53590c36379a0340ce8406dc83273e852fc3a923d6b8a0c0e86698e941583463
def test_needs_unfloors(): 'Floors shrinks both primes to one smaller implicant.\n\n The returned cover is a minimal cover, so the\n assertion `_covers` in the function `cover.minimize` passes.\n However, the returned cover is not made of primes from\n the set `y` computed by calling `prime_implicants`.\n\n Finding the primes takes into account the care set.\n The resulting covering problem is such that shrinking happens.\n Therefore, unfloors is necessary in this problem.\n ' fol = _fol.Context() fol.declare(x=(0, 1), y=(0, 1)) f = fol.add_expr('x = 0 /\\ y = 0') care = fol.add_expr('\n \\/ (x = 0 /\\ y = 0)\n \\/ (x = 1 /\\ y = 1)\n ') cover = cov.minimize(f, care, fol) implicants = list(fol.pick_iter(cover)) assert (len(implicants) == 1), implicants (d,) = implicants d_1 = dict(a_x=0, b_x=1, a_y=0, b_y=0) d_2 = dict(a_x=0, b_x=0, a_y=0, b_y=1) assert ((d == d_1) or (d == d_2)), d
Floors shrinks both primes to one smaller implicant. The returned cover is a minimal cover, so the assertion `_covers` in the function `cover.minimize` passes. However, the returned cover is not made of primes from the set `y` computed by calling `prime_implicants`. Finding the primes takes into account the care set. The resulting covering problem is such that shrinking happens. Therefore, unfloors is necessary in this problem.
tests/cover_test.py
test_needs_unfloors
tulip-control/omega
24
python
def test_needs_unfloors(): 'Floors shrinks both primes to one smaller implicant.\n\n The returned cover is a minimal cover, so the\n assertion `_covers` in the function `cover.minimize` passes.\n However, the returned cover is not made of primes from\n the set `y` computed by calling `prime_implicants`.\n\n Finding the primes takes into account the care set.\n The resulting covering problem is such that shrinking happens.\n Therefore, unfloors is necessary in this problem.\n ' fol = _fol.Context() fol.declare(x=(0, 1), y=(0, 1)) f = fol.add_expr('x = 0 /\\ y = 0') care = fol.add_expr('\n \\/ (x = 0 /\\ y = 0)\n \\/ (x = 1 /\\ y = 1)\n ') cover = cov.minimize(f, care, fol) implicants = list(fol.pick_iter(cover)) assert (len(implicants) == 1), implicants (d,) = implicants d_1 = dict(a_x=0, b_x=1, a_y=0, b_y=0) d_2 = dict(a_x=0, b_x=0, a_y=0, b_y=1) assert ((d == d_1) or (d == d_2)), d
def test_needs_unfloors(): 'Floors shrinks both primes to one smaller implicant.\n\n The returned cover is a minimal cover, so the\n assertion `_covers` in the function `cover.minimize` passes.\n However, the returned cover is not made of primes from\n the set `y` computed by calling `prime_implicants`.\n\n Finding the primes takes into account the care set.\n The resulting covering problem is such that shrinking happens.\n Therefore, unfloors is necessary in this problem.\n ' fol = _fol.Context() fol.declare(x=(0, 1), y=(0, 1)) f = fol.add_expr('x = 0 /\\ y = 0') care = fol.add_expr('\n \\/ (x = 0 /\\ y = 0)\n \\/ (x = 1 /\\ y = 1)\n ') cover = cov.minimize(f, care, fol) implicants = list(fol.pick_iter(cover)) assert (len(implicants) == 1), implicants (d,) = implicants d_1 = dict(a_x=0, b_x=1, a_y=0, b_y=0) d_2 = dict(a_x=0, b_x=0, a_y=0, b_y=1) assert ((d == d_1) or (d == d_2)), d<|docstring|>Floors shrinks both primes to one smaller implicant. The returned cover is a minimal cover, so the assertion `_covers` in the function `cover.minimize` passes. However, the returned cover is not made of primes from the set `y` computed by calling `prime_implicants`. Finding the primes takes into account the care set. The resulting covering problem is such that shrinking happens. Therefore, unfloors is necessary in this problem.<|endoftext|>
a6c20084e8a90f83da1fdbfacf8e266e5291e42cd0a0f175c5f879128af0c011
def robots_example(fol): "Return cooperative winning set from ACC'16 example." c = ['(x = 0) /\\ (y = 4)', '(x = 0) /\\ (y = 5)', '(x = 0) /\\ (y = 2)', '(x = 0) /\\ (y = 3)', '(x = 0) /\\ (y = 6)', '(x = 0) /\\ (y = 7)', '(x = 1) /\\ (y = 0)', '(x = 1) /\\ (y = 2)', '(x = 1) /\\ (y = 4)', '(x = 1) /\\ (y = 6)', '(x = 1) /\\ (y = 5)', '(x = 1) /\\ (y = 3)', '(x = 1) /\\ (y = 7)', '(x = 2) /\\ (y = 0)', '(x = 2) /\\ (y = 1)', '(x = 2) /\\ (y = 6)', '(x = 2) /\\ (y = 7)', '(x = 3) /\\ (y = 0)', '(x = 3) /\\ (y = 2)', '(x = 3) /\\ (y = 6)', '(x = 3) /\\ (y = 1)', '(x = 3) /\\ (y = 7)', '(x = 4) /\\ (y = 0)', '(x = 4) /\\ (y = 1)', '(x = 4) /\\ (y = 2)', '(x = 4) /\\ (y = 3)', '(x = 4) /\\ (y = 6)', '(x = 4) /\\ (y = 7)', '(x = 5) /\\ (y = 0)', '(x = 5) /\\ (y = 2)', '(x = 5) /\\ (y = 4)', '(x = 5) /\\ (y = 6)', '(x = 5) /\\ (y = 1)', '(x = 5) /\\ (y = 3)', '(x = 5) /\\ (y = 7)'] s = stx.disj(c) u = fol.add_expr(s) return u
Return cooperative winning set from ACC'16 example.
tests/cover_test.py
robots_example
tulip-control/omega
24
python
def robots_example(fol): c = ['(x = 0) /\\ (y = 4)', '(x = 0) /\\ (y = 5)', '(x = 0) /\\ (y = 2)', '(x = 0) /\\ (y = 3)', '(x = 0) /\\ (y = 6)', '(x = 0) /\\ (y = 7)', '(x = 1) /\\ (y = 0)', '(x = 1) /\\ (y = 2)', '(x = 1) /\\ (y = 4)', '(x = 1) /\\ (y = 6)', '(x = 1) /\\ (y = 5)', '(x = 1) /\\ (y = 3)', '(x = 1) /\\ (y = 7)', '(x = 2) /\\ (y = 0)', '(x = 2) /\\ (y = 1)', '(x = 2) /\\ (y = 6)', '(x = 2) /\\ (y = 7)', '(x = 3) /\\ (y = 0)', '(x = 3) /\\ (y = 2)', '(x = 3) /\\ (y = 6)', '(x = 3) /\\ (y = 1)', '(x = 3) /\\ (y = 7)', '(x = 4) /\\ (y = 0)', '(x = 4) /\\ (y = 1)', '(x = 4) /\\ (y = 2)', '(x = 4) /\\ (y = 3)', '(x = 4) /\\ (y = 6)', '(x = 4) /\\ (y = 7)', '(x = 5) /\\ (y = 0)', '(x = 5) /\\ (y = 2)', '(x = 5) /\\ (y = 4)', '(x = 5) /\\ (y = 6)', '(x = 5) /\\ (y = 1)', '(x = 5) /\\ (y = 3)', '(x = 5) /\\ (y = 7)'] s = stx.disj(c) u = fol.add_expr(s) return u
def robots_example(fol): c = ['(x = 0) /\\ (y = 4)', '(x = 0) /\\ (y = 5)', '(x = 0) /\\ (y = 2)', '(x = 0) /\\ (y = 3)', '(x = 0) /\\ (y = 6)', '(x = 0) /\\ (y = 7)', '(x = 1) /\\ (y = 0)', '(x = 1) /\\ (y = 2)', '(x = 1) /\\ (y = 4)', '(x = 1) /\\ (y = 6)', '(x = 1) /\\ (y = 5)', '(x = 1) /\\ (y = 3)', '(x = 1) /\\ (y = 7)', '(x = 2) /\\ (y = 0)', '(x = 2) /\\ (y = 1)', '(x = 2) /\\ (y = 6)', '(x = 2) /\\ (y = 7)', '(x = 3) /\\ (y = 0)', '(x = 3) /\\ (y = 2)', '(x = 3) /\\ (y = 6)', '(x = 3) /\\ (y = 1)', '(x = 3) /\\ (y = 7)', '(x = 4) /\\ (y = 0)', '(x = 4) /\\ (y = 1)', '(x = 4) /\\ (y = 2)', '(x = 4) /\\ (y = 3)', '(x = 4) /\\ (y = 6)', '(x = 4) /\\ (y = 7)', '(x = 5) /\\ (y = 0)', '(x = 5) /\\ (y = 2)', '(x = 5) /\\ (y = 4)', '(x = 5) /\\ (y = 6)', '(x = 5) /\\ (y = 1)', '(x = 5) /\\ (y = 3)', '(x = 5) /\\ (y = 7)'] s = stx.disj(c) u = fol.add_expr(s) return u<|docstring|>Return cooperative winning set from ACC'16 example.<|endoftext|>
6fa1cf7dfc26c6bf7b968b9bf1872e6ff29b9e2007a033626d329ce14493f1b6
def register(pluginFn): '\n Register commands for plugin\n @param pluginFn (MFnPlugin): plugin object passed to initializePlugin\n ' pluginFn.registerCommand(importCmd.kCmdName, importCmd.creator, importCmd.syntaxCreator) pluginFn.registerCommand(exportCmd.kCmdName, exportCmd.creator, exportCmd.syntaxCreator) return
Register commands for plugin @param pluginFn (MFnPlugin): plugin object passed to initializePlugin
CA/Assets/FbxExporters/Integrations/Autodesk/maya/scripts/UnityFbxForMaya/commands.py
register
Bartlett-RC3/skilling-module-1-peljevic
0
python
def register(pluginFn): '\n Register commands for plugin\n @param pluginFn (MFnPlugin): plugin object passed to initializePlugin\n ' pluginFn.registerCommand(importCmd.kCmdName, importCmd.creator, importCmd.syntaxCreator) pluginFn.registerCommand(exportCmd.kCmdName, exportCmd.creator, exportCmd.syntaxCreator) return
def register(pluginFn): '\n Register commands for plugin\n @param pluginFn (MFnPlugin): plugin object passed to initializePlugin\n ' pluginFn.registerCommand(importCmd.kCmdName, importCmd.creator, importCmd.syntaxCreator) pluginFn.registerCommand(exportCmd.kCmdName, exportCmd.creator, exportCmd.syntaxCreator) return<|docstring|>Register commands for plugin @param pluginFn (MFnPlugin): plugin object passed to initializePlugin<|endoftext|>
7e1b0268f175684daab0860e99a385682d580de117989b2f25f85c9d187803d5
def unregister(pluginFn): '\n Unregister commands for plugin\n @param pluginFn (MFnPlugin): plugin object passed to uninitializePlugin\n ' pluginFn.deregisterCommand(importCmd.kCmdName) pluginFn.deregisterCommand(exportCmd.kCmdName) return
Unregister commands for plugin @param pluginFn (MFnPlugin): plugin object passed to uninitializePlugin
CA/Assets/FbxExporters/Integrations/Autodesk/maya/scripts/UnityFbxForMaya/commands.py
unregister
Bartlett-RC3/skilling-module-1-peljevic
0
python
def unregister(pluginFn): '\n Unregister commands for plugin\n @param pluginFn (MFnPlugin): plugin object passed to uninitializePlugin\n ' pluginFn.deregisterCommand(importCmd.kCmdName) pluginFn.deregisterCommand(exportCmd.kCmdName) return
def unregister(pluginFn): '\n Unregister commands for plugin\n @param pluginFn (MFnPlugin): plugin object passed to uninitializePlugin\n ' pluginFn.deregisterCommand(importCmd.kCmdName) pluginFn.deregisterCommand(exportCmd.kCmdName) return<|docstring|>Unregister commands for plugin @param pluginFn (MFnPlugin): plugin object passed to uninitializePlugin<|endoftext|>
18bb7452e18d659fa8a332fec03370b6ff1e398eecd38735863dcdbd641df169
def loadUnityFbxExportSettings(self): '\n Load the Export Settings from file\n ' projectPath = maya.cmds.optionVar(q='UnityProject') fileName = os.path.join(projectPath, 'Assets', maya.cmds.optionVar(q='UnityFbxExportSettings')) if (not os.path.isfile(fileName)): maya.cmds.error('Failed to find Unity Fbx Export Settings at: {0}'.format(fileName)) return False with open(fileName) as f: contents = f.read() maya.mel.eval(contents) return True
Load the Export Settings from file
CA/Assets/FbxExporters/Integrations/Autodesk/maya/scripts/UnityFbxForMaya/commands.py
loadUnityFbxExportSettings
Bartlett-RC3/skilling-module-1-peljevic
0
python
def loadUnityFbxExportSettings(self): '\n \n ' projectPath = maya.cmds.optionVar(q='UnityProject') fileName = os.path.join(projectPath, 'Assets', maya.cmds.optionVar(q='UnityFbxExportSettings')) if (not os.path.isfile(fileName)): maya.cmds.error('Failed to find Unity Fbx Export Settings at: {0}'.format(fileName)) return False with open(fileName) as f: contents = f.read() maya.mel.eval(contents) return True
def loadUnityFbxExportSettings(self): '\n \n ' projectPath = maya.cmds.optionVar(q='UnityProject') fileName = os.path.join(projectPath, 'Assets', maya.cmds.optionVar(q='UnityFbxExportSettings')) if (not os.path.isfile(fileName)): maya.cmds.error('Failed to find Unity Fbx Export Settings at: {0}'.format(fileName)) return False with open(fileName) as f: contents = f.read() maya.mel.eval(contents) return True<|docstring|>Load the Export Settings from file<|endoftext|>
54cf0c3c12beffd26e77f2806416bf7b91fd01900e98e8782329f6a6195be158
@classmethod def invoke(cls): '\n Invoke command using mel so that it is executed and logged to script editor log\n @return: void\n ' strCmd = '{0};'.format(cls.kCmdName) maya.mel.eval(strCmd)
Invoke command using mel so that it is executed and logged to script editor log @return: void
CA/Assets/FbxExporters/Integrations/Autodesk/maya/scripts/UnityFbxForMaya/commands.py
invoke
Bartlett-RC3/skilling-module-1-peljevic
0
python
@classmethod def invoke(cls): '\n Invoke command using mel so that it is executed and logged to script editor log\n @return: void\n ' strCmd = '{0};'.format(cls.kCmdName) maya.mel.eval(strCmd)
@classmethod def invoke(cls): '\n Invoke command using mel so that it is executed and logged to script editor log\n @return: void\n ' strCmd = '{0};'.format(cls.kCmdName) maya.mel.eval(strCmd)<|docstring|>Invoke command using mel so that it is executed and logged to script editor log @return: void<|endoftext|>
013018384eac7a2befec8537effd99336d12db26d99c78d9c0e8d1871615bcc9
@classmethod def invoke(cls): '\n Invoke command using mel so that it is executed and logged to script editor log\n @return: void\n ' strCmd = '{0};'.format(cls.kCmdName) maya.mel.eval(strCmd)
Invoke command using mel so that it is executed and logged to script editor log @return: void
CA/Assets/FbxExporters/Integrations/Autodesk/maya/scripts/UnityFbxForMaya/commands.py
invoke
Bartlett-RC3/skilling-module-1-peljevic
0
python
@classmethod def invoke(cls): '\n Invoke command using mel so that it is executed and logged to script editor log\n @return: void\n ' strCmd = '{0};'.format(cls.kCmdName) maya.mel.eval(strCmd)
@classmethod def invoke(cls): '\n Invoke command using mel so that it is executed and logged to script editor log\n @return: void\n ' strCmd = '{0};'.format(cls.kCmdName) maya.mel.eval(strCmd)<|docstring|>Invoke command using mel so that it is executed and logged to script editor log @return: void<|endoftext|>
19a759e9bf4a80a0c02958b2c0c1e9802b9ec2da891767e380087c53cd0e468a
def __init__(self, logger, dp, stack, tunnel_acls, acl_manager, **kwargs): '\n Initialize variables and set up peer distances\n\n Args:\n stack (Stack): Stack object of the DP on the Valve being managed\n ' self.logger = logger self.dp = dp self.stack = stack self.tunnel_acls = tunnel_acls self.acl_manager = acl_manager self.towards_root_ports = None self.chosen_towards_ports = None self.chosen_towards_port = None self.away_ports = None self.inactive_away_ports = None self.pruned_away_ports = None self.reset_peer_distances()
Initialize variables and set up peer distances Args: stack (Stack): Stack object of the DP on the Valve being managed
faucet/valve_stack.py
__init__
pbatta/faucet
0
python
def __init__(self, logger, dp, stack, tunnel_acls, acl_manager, **kwargs): '\n Initialize variables and set up peer distances\n\n Args:\n stack (Stack): Stack object of the DP on the Valve being managed\n ' self.logger = logger self.dp = dp self.stack = stack self.tunnel_acls = tunnel_acls self.acl_manager = acl_manager self.towards_root_ports = None self.chosen_towards_ports = None self.chosen_towards_port = None self.away_ports = None self.inactive_away_ports = None self.pruned_away_ports = None self.reset_peer_distances()
def __init__(self, logger, dp, stack, tunnel_acls, acl_manager, **kwargs): '\n Initialize variables and set up peer distances\n\n Args:\n stack (Stack): Stack object of the DP on the Valve being managed\n ' self.logger = logger self.dp = dp self.stack = stack self.tunnel_acls = tunnel_acls self.acl_manager = acl_manager self.towards_root_ports = None self.chosen_towards_ports = None self.chosen_towards_port = None self.away_ports = None self.inactive_away_ports = None self.pruned_away_ports = None self.reset_peer_distances()<|docstring|>Initialize variables and set up peer distances Args: stack (Stack): Stack object of the DP on the Valve being managed<|endoftext|>
86363f8d62849e9f6cade3cf70ca962b8ca1758bba37078115c142ebe1ca32ae
@staticmethod def stacked_valves(valves): 'Return set of valves that have stacking enabled' return {valve for valve in valves if (valve.dp.stack and valve.dp.stack.root_name)}
Return set of valves that have stacking enabled
faucet/valve_stack.py
stacked_valves
pbatta/faucet
0
python
@staticmethod def stacked_valves(valves): return {valve for valve in valves if (valve.dp.stack and valve.dp.stack.root_name)}
@staticmethod def stacked_valves(valves): return {valve for valve in valves if (valve.dp.stack and valve.dp.stack.root_name)}<|docstring|>Return set of valves that have stacking enabled<|endoftext|>
5a71704ae4ec528e9584cb1020b0b8b8356de5202172c0eda6d9fe514db422b5
def reset_peer_distances(self): 'Recalculates the towards and away ports for this node' self.towards_root_ports = set() self.chosen_towards_ports = set() self.chosen_towards_port = None self.away_ports = set() self.inactive_away_ports = set() self.pruned_away_ports = set() all_peer_ports = set(self.stack.canonical_up_ports()) if self.stack.is_root(): self.away_ports = all_peer_ports else: port_peer_distances = {port: len(port.stack['dp'].stack.shortest_path_to_root()) for port in all_peer_ports} shortest_peer_distance = None for (port, port_peer_distance) in port_peer_distances.items(): if (shortest_peer_distance is None): shortest_peer_distance = port_peer_distance continue shortest_peer_distance = min(shortest_peer_distance, port_peer_distance) self.towards_root_ports = {port for (port, port_peer_distance) in port_peer_distances.items() if (port_peer_distance == shortest_peer_distance)} self.away_ports = (all_peer_ports - self.towards_root_ports) if self.towards_root_ports: shortest_path = self.stack.shortest_path_to_root() if (shortest_path and (len(shortest_path) > 1)): first_peer_dp = shortest_path[1] else: first_peer_port = self.stack.canonical_port_order(self.towards_root_ports)[0] first_peer_dp = first_peer_port.stack['dp'].name self.chosen_towards_ports = {port for port in self.towards_root_ports if (port.stack['dp'].name == first_peer_dp)} if self.chosen_towards_ports: self.chosen_towards_port = self.stack.canonical_up_ports(self.chosen_towards_ports)[0] self.away_ports = (all_peer_ports - self.towards_root_ports) if self.away_ports: self.inactive_away_ports = {port for port in self.away_ports if (not self.stack.is_in_path(port.stack['dp'].name, self.stack.root_name))} ports_by_dp = defaultdict(list) for port in self.away_ports: ports_by_dp[port.stack['dp']].append(port) for ports in ports_by_dp.values(): remote_away_ports = self.stack.canonical_up_ports([port.stack['port'] for port in ports]) self.pruned_away_ports.update([port.stack['port'] for port in remote_away_ports if (port != remote_away_ports[0])]) return self.chosen_towards_ports
Recalculates the towards and away ports for this node
faucet/valve_stack.py
reset_peer_distances
pbatta/faucet
0
python
def reset_peer_distances(self): self.towards_root_ports = set() self.chosen_towards_ports = set() self.chosen_towards_port = None self.away_ports = set() self.inactive_away_ports = set() self.pruned_away_ports = set() all_peer_ports = set(self.stack.canonical_up_ports()) if self.stack.is_root(): self.away_ports = all_peer_ports else: port_peer_distances = {port: len(port.stack['dp'].stack.shortest_path_to_root()) for port in all_peer_ports} shortest_peer_distance = None for (port, port_peer_distance) in port_peer_distances.items(): if (shortest_peer_distance is None): shortest_peer_distance = port_peer_distance continue shortest_peer_distance = min(shortest_peer_distance, port_peer_distance) self.towards_root_ports = {port for (port, port_peer_distance) in port_peer_distances.items() if (port_peer_distance == shortest_peer_distance)} self.away_ports = (all_peer_ports - self.towards_root_ports) if self.towards_root_ports: shortest_path = self.stack.shortest_path_to_root() if (shortest_path and (len(shortest_path) > 1)): first_peer_dp = shortest_path[1] else: first_peer_port = self.stack.canonical_port_order(self.towards_root_ports)[0] first_peer_dp = first_peer_port.stack['dp'].name self.chosen_towards_ports = {port for port in self.towards_root_ports if (port.stack['dp'].name == first_peer_dp)} if self.chosen_towards_ports: self.chosen_towards_port = self.stack.canonical_up_ports(self.chosen_towards_ports)[0] self.away_ports = (all_peer_ports - self.towards_root_ports) if self.away_ports: self.inactive_away_ports = {port for port in self.away_ports if (not self.stack.is_in_path(port.stack['dp'].name, self.stack.root_name))} ports_by_dp = defaultdict(list) for port in self.away_ports: ports_by_dp[port.stack['dp']].append(port) for ports in ports_by_dp.values(): remote_away_ports = self.stack.canonical_up_ports([port.stack['port'] for port in ports]) self.pruned_away_ports.update([port.stack['port'] for port in remote_away_ports if (port != remote_away_ports[0])]) return self.chosen_towards_ports
def reset_peer_distances(self): self.towards_root_ports = set() self.chosen_towards_ports = set() self.chosen_towards_port = None self.away_ports = set() self.inactive_away_ports = set() self.pruned_away_ports = set() all_peer_ports = set(self.stack.canonical_up_ports()) if self.stack.is_root(): self.away_ports = all_peer_ports else: port_peer_distances = {port: len(port.stack['dp'].stack.shortest_path_to_root()) for port in all_peer_ports} shortest_peer_distance = None for (port, port_peer_distance) in port_peer_distances.items(): if (shortest_peer_distance is None): shortest_peer_distance = port_peer_distance continue shortest_peer_distance = min(shortest_peer_distance, port_peer_distance) self.towards_root_ports = {port for (port, port_peer_distance) in port_peer_distances.items() if (port_peer_distance == shortest_peer_distance)} self.away_ports = (all_peer_ports - self.towards_root_ports) if self.towards_root_ports: shortest_path = self.stack.shortest_path_to_root() if (shortest_path and (len(shortest_path) > 1)): first_peer_dp = shortest_path[1] else: first_peer_port = self.stack.canonical_port_order(self.towards_root_ports)[0] first_peer_dp = first_peer_port.stack['dp'].name self.chosen_towards_ports = {port for port in self.towards_root_ports if (port.stack['dp'].name == first_peer_dp)} if self.chosen_towards_ports: self.chosen_towards_port = self.stack.canonical_up_ports(self.chosen_towards_ports)[0] self.away_ports = (all_peer_ports - self.towards_root_ports) if self.away_ports: self.inactive_away_ports = {port for port in self.away_ports if (not self.stack.is_in_path(port.stack['dp'].name, self.stack.root_name))} ports_by_dp = defaultdict(list) for port in self.away_ports: ports_by_dp[port.stack['dp']].append(port) for ports in ports_by_dp.values(): remote_away_ports = self.stack.canonical_up_ports([port.stack['port'] for port in ports]) self.pruned_away_ports.update([port.stack['port'] for port in remote_away_ports if (port != remote_away_ports[0])]) return self.chosen_towards_ports<|docstring|>Recalculates the towards and away ports for this node<|endoftext|>
5feeaaee91e9d578d054584062b56aeccee65cb80a1cfa3d02dac0ac50b340cc
def update_stack_topo(self, event, dp, port): '\n Update the stack topo according to the event.\n\n Args:\n event (bool): True if the port is UP\n dp (DP): DP object\n port (Port): The port being brought UP/DOWN\n ' self.stack.modify_link(dp, port, event) towards_ports = self.reset_peer_distances() if towards_ports: self.logger.info(('shortest path to root is via %s' % towards_ports)) else: self.logger.info('no path available to root')
Update the stack topo according to the event. Args: event (bool): True if the port is UP dp (DP): DP object port (Port): The port being brought UP/DOWN
faucet/valve_stack.py
update_stack_topo
pbatta/faucet
0
python
def update_stack_topo(self, event, dp, port): '\n Update the stack topo according to the event.\n\n Args:\n event (bool): True if the port is UP\n dp (DP): DP object\n port (Port): The port being brought UP/DOWN\n ' self.stack.modify_link(dp, port, event) towards_ports = self.reset_peer_distances() if towards_ports: self.logger.info(('shortest path to root is via %s' % towards_ports)) else: self.logger.info('no path available to root')
def update_stack_topo(self, event, dp, port): '\n Update the stack topo according to the event.\n\n Args:\n event (bool): True if the port is UP\n dp (DP): DP object\n port (Port): The port being brought UP/DOWN\n ' self.stack.modify_link(dp, port, event) towards_ports = self.reset_peer_distances() if towards_ports: self.logger.info(('shortest path to root is via %s' % towards_ports)) else: self.logger.info('no path available to root')<|docstring|>Update the stack topo according to the event. Args: event (bool): True if the port is UP dp (DP): DP object port (Port): The port being brought UP/DOWN<|endoftext|>
da48ec68c6042eb472c1394edc2f14cbc423923bf8a7f68a34db698df7ff0db7
def default_port_towards(self, dp_name): '\n Default shortest path towards the provided destination, via direct shortest path\n\n Args:\n dp_name (str): Destination DP\n Returns:\n Port: port from current node that is shortest directly towards destination\n ' return self.stack.shortest_path_port(dp_name)
Default shortest path towards the provided destination, via direct shortest path Args: dp_name (str): Destination DP Returns: Port: port from current node that is shortest directly towards destination
faucet/valve_stack.py
default_port_towards
pbatta/faucet
0
python
def default_port_towards(self, dp_name): '\n Default shortest path towards the provided destination, via direct shortest path\n\n Args:\n dp_name (str): Destination DP\n Returns:\n Port: port from current node that is shortest directly towards destination\n ' return self.stack.shortest_path_port(dp_name)
def default_port_towards(self, dp_name): '\n Default shortest path towards the provided destination, via direct shortest path\n\n Args:\n dp_name (str): Destination DP\n Returns:\n Port: port from current node that is shortest directly towards destination\n ' return self.stack.shortest_path_port(dp_name)<|docstring|>Default shortest path towards the provided destination, via direct shortest path Args: dp_name (str): Destination DP Returns: Port: port from current node that is shortest directly towards destination<|endoftext|>
edad9200974f5daf9faa6ad6d35b7e25882633145cf76d044421fe87e4035094
def relative_port_towards(self, dp_name): '\n Returns the shortest path towards provided destination, via either the root or away paths\n\n Args:\n dp_name (str): Destination DP\n Returns:\n Port: port from current node that is towards/away the destination DP depending on\n relative position of the current node\n ' if (not self.stack.shortest_path_to_root()): return self.default_port_towards(dp_name) if (self.stack.name == dp_name): return self.default_port_towards(dp_name) path_to_root = self.stack.shortest_path_to_root(dp_name) if (path_to_root and (self.stack.name in path_to_root)): away_dp = path_to_root[(path_to_root.index(self.stack.name) - 1)] for port in self.away_ports: if ((port.stack['dp'].name == away_dp) and (not self.is_pruned_port(port))): return port return None return self.chosen_towards_port
Returns the shortest path towards provided destination, via either the root or away paths Args: dp_name (str): Destination DP Returns: Port: port from current node that is towards/away the destination DP depending on relative position of the current node
faucet/valve_stack.py
relative_port_towards
pbatta/faucet
0
python
def relative_port_towards(self, dp_name): '\n Returns the shortest path towards provided destination, via either the root or away paths\n\n Args:\n dp_name (str): Destination DP\n Returns:\n Port: port from current node that is towards/away the destination DP depending on\n relative position of the current node\n ' if (not self.stack.shortest_path_to_root()): return self.default_port_towards(dp_name) if (self.stack.name == dp_name): return self.default_port_towards(dp_name) path_to_root = self.stack.shortest_path_to_root(dp_name) if (path_to_root and (self.stack.name in path_to_root)): away_dp = path_to_root[(path_to_root.index(self.stack.name) - 1)] for port in self.away_ports: if ((port.stack['dp'].name == away_dp) and (not self.is_pruned_port(port))): return port return None return self.chosen_towards_port
def relative_port_towards(self, dp_name): '\n Returns the shortest path towards provided destination, via either the root or away paths\n\n Args:\n dp_name (str): Destination DP\n Returns:\n Port: port from current node that is towards/away the destination DP depending on\n relative position of the current node\n ' if (not self.stack.shortest_path_to_root()): return self.default_port_towards(dp_name) if (self.stack.name == dp_name): return self.default_port_towards(dp_name) path_to_root = self.stack.shortest_path_to_root(dp_name) if (path_to_root and (self.stack.name in path_to_root)): away_dp = path_to_root[(path_to_root.index(self.stack.name) - 1)] for port in self.away_ports: if ((port.stack['dp'].name == away_dp) and (not self.is_pruned_port(port))): return port return None return self.chosen_towards_port<|docstring|>Returns the shortest path towards provided destination, via either the root or away paths Args: dp_name (str): Destination DP Returns: Port: port from current node that is towards/away the destination DP depending on relative position of the current node<|endoftext|>
48f8d3f31acae2455d48cf808c86eea3338f359b8e2ca731ac0dc3eba50d70d2
def edge_learn_port_towards(self, pkt_meta, edge_dp): '\n Returns the port towards the edge DP\n\n Args:\n pkt_meta (PacketMeta): Packet on the edge DP\n edge_dp (DP): Edge DP that received the packet\n Returns:\n Port: Port towards the edge DP via some stack chosen metric\n ' if pkt_meta.vlan.edge_learn_stack_root: return self.relative_port_towards(edge_dp.name) return self.default_port_towards(edge_dp.name)
Returns the port towards the edge DP Args: pkt_meta (PacketMeta): Packet on the edge DP edge_dp (DP): Edge DP that received the packet Returns: Port: Port towards the edge DP via some stack chosen metric
faucet/valve_stack.py
edge_learn_port_towards
pbatta/faucet
0
python
def edge_learn_port_towards(self, pkt_meta, edge_dp): '\n Returns the port towards the edge DP\n\n Args:\n pkt_meta (PacketMeta): Packet on the edge DP\n edge_dp (DP): Edge DP that received the packet\n Returns:\n Port: Port towards the edge DP via some stack chosen metric\n ' if pkt_meta.vlan.edge_learn_stack_root: return self.relative_port_towards(edge_dp.name) return self.default_port_towards(edge_dp.name)
def edge_learn_port_towards(self, pkt_meta, edge_dp): '\n Returns the port towards the edge DP\n\n Args:\n pkt_meta (PacketMeta): Packet on the edge DP\n edge_dp (DP): Edge DP that received the packet\n Returns:\n Port: Port towards the edge DP via some stack chosen metric\n ' if pkt_meta.vlan.edge_learn_stack_root: return self.relative_port_towards(edge_dp.name) return self.default_port_towards(edge_dp.name)<|docstring|>Returns the port towards the edge DP Args: pkt_meta (PacketMeta): Packet on the edge DP edge_dp (DP): Edge DP that received the packet Returns: Port: Port towards the edge DP via some stack chosen metric<|endoftext|>
405d69265e8394cbf3f33bb9c3c8841f72b6f82abcb00942dd9666d89e23b9aa
def tunnel_outport(self, src_dp, dst_dp, dst_port): '\n Returns the output port for the current stack node for the tunnel path\n\n Args:\n src_dp (str): Source DP name of the tunnel\n dst_dp (str): Destination DP name of the tunnel\n dst_port (int): Destination port of the tunnel\n Returns:\n int: Output port number for the current node of the tunnel\n ' if (not self.stack.is_in_path(src_dp, dst_dp)): return None out_port = self.default_port_towards(dst_dp) if (self.stack.name == dst_dp): out_port = dst_port elif out_port: out_port = out_port.number return out_port
Returns the output port for the current stack node for the tunnel path Args: src_dp (str): Source DP name of the tunnel dst_dp (str): Destination DP name of the tunnel dst_port (int): Destination port of the tunnel Returns: int: Output port number for the current node of the tunnel
faucet/valve_stack.py
tunnel_outport
pbatta/faucet
0
python
def tunnel_outport(self, src_dp, dst_dp, dst_port): '\n Returns the output port for the current stack node for the tunnel path\n\n Args:\n src_dp (str): Source DP name of the tunnel\n dst_dp (str): Destination DP name of the tunnel\n dst_port (int): Destination port of the tunnel\n Returns:\n int: Output port number for the current node of the tunnel\n ' if (not self.stack.is_in_path(src_dp, dst_dp)): return None out_port = self.default_port_towards(dst_dp) if (self.stack.name == dst_dp): out_port = dst_port elif out_port: out_port = out_port.number return out_port
def tunnel_outport(self, src_dp, dst_dp, dst_port): '\n Returns the output port for the current stack node for the tunnel path\n\n Args:\n src_dp (str): Source DP name of the tunnel\n dst_dp (str): Destination DP name of the tunnel\n dst_port (int): Destination port of the tunnel\n Returns:\n int: Output port number for the current node of the tunnel\n ' if (not self.stack.is_in_path(src_dp, dst_dp)): return None out_port = self.default_port_towards(dst_dp) if (self.stack.name == dst_dp): out_port = dst_port elif out_port: out_port = out_port.number return out_port<|docstring|>Returns the output port for the current stack node for the tunnel path Args: src_dp (str): Source DP name of the tunnel dst_dp (str): Destination DP name of the tunnel dst_port (int): Destination port of the tunnel Returns: int: Output port number for the current node of the tunnel<|endoftext|>
bfe6bac19d6f7a2890079b85cf895202626a76cfe7cc6d0b825e92e5c0f88c3b
def update_health(self, now, last_live_times, update_time): '\n Returns whether the current stack node is healthy, a healthy stack node\n is one that attempted connected recently, or was known to be running\n recently, has all LAGs UP and any stack port UP\n\n Args:\n now (float): Current time\n last_live_times (dict): Last live time value for each DP\n update_time (int): Stack root update interval time\n Returns:\n bool: True if current stack node is healthy\n ' prev_health = self.stack.dyn_healthy (new_health, reason) = self.stack.update_health(now, last_live_times, update_time, self.dp.lacp_down_ports(), self.stack.down_ports()) if (prev_health != new_health): health = ('HEALTHY' if new_health else 'UNHEALTHY') self.logger.info(('Stack node %s %s (%s)' % (self.stack.name, health, reason))) return new_health
Returns whether the current stack node is healthy, a healthy stack node is one that attempted connected recently, or was known to be running recently, has all LAGs UP and any stack port UP Args: now (float): Current time last_live_times (dict): Last live time value for each DP update_time (int): Stack root update interval time Returns: bool: True if current stack node is healthy
faucet/valve_stack.py
update_health
pbatta/faucet
0
python
def update_health(self, now, last_live_times, update_time): '\n Returns whether the current stack node is healthy, a healthy stack node\n is one that attempted connected recently, or was known to be running\n recently, has all LAGs UP and any stack port UP\n\n Args:\n now (float): Current time\n last_live_times (dict): Last live time value for each DP\n update_time (int): Stack root update interval time\n Returns:\n bool: True if current stack node is healthy\n ' prev_health = self.stack.dyn_healthy (new_health, reason) = self.stack.update_health(now, last_live_times, update_time, self.dp.lacp_down_ports(), self.stack.down_ports()) if (prev_health != new_health): health = ('HEALTHY' if new_health else 'UNHEALTHY') self.logger.info(('Stack node %s %s (%s)' % (self.stack.name, health, reason))) return new_health
def update_health(self, now, last_live_times, update_time): '\n Returns whether the current stack node is healthy, a healthy stack node\n is one that attempted connected recently, or was known to be running\n recently, has all LAGs UP and any stack port UP\n\n Args:\n now (float): Current time\n last_live_times (dict): Last live time value for each DP\n update_time (int): Stack root update interval time\n Returns:\n bool: True if current stack node is healthy\n ' prev_health = self.stack.dyn_healthy (new_health, reason) = self.stack.update_health(now, last_live_times, update_time, self.dp.lacp_down_ports(), self.stack.down_ports()) if (prev_health != new_health): health = ('HEALTHY' if new_health else 'UNHEALTHY') self.logger.info(('Stack node %s %s (%s)' % (self.stack.name, health, reason))) return new_health<|docstring|>Returns whether the current stack node is healthy, a healthy stack node is one that attempted connected recently, or was known to be running recently, has all LAGs UP and any stack port UP Args: now (float): Current time last_live_times (dict): Last live time value for each DP update_time (int): Stack root update interval time Returns: bool: True if current stack node is healthy<|endoftext|>
b0a91da82ce570c5b44ceeb6ee3dac9d3f51b7787c3c782d251582316a587217
def consistent_roots(self, expected_root_name, valve, other_valves): 'Returns true if all the stack nodes have the root configured correctly' stacked_valves = {valve}.union(self.stacked_valves(other_valves)) for stack_valve in stacked_valves: if (stack_valve.dp.stack.root_name != expected_root_name): return False return True
Returns true if all the stack nodes have the root configured correctly
faucet/valve_stack.py
consistent_roots
pbatta/faucet
0
python
def consistent_roots(self, expected_root_name, valve, other_valves): stacked_valves = {valve}.union(self.stacked_valves(other_valves)) for stack_valve in stacked_valves: if (stack_valve.dp.stack.root_name != expected_root_name): return False return True
def consistent_roots(self, expected_root_name, valve, other_valves): stacked_valves = {valve}.union(self.stacked_valves(other_valves)) for stack_valve in stacked_valves: if (stack_valve.dp.stack.root_name != expected_root_name): return False return True<|docstring|>Returns true if all the stack nodes have the root configured correctly<|endoftext|>
5cbcc56e6a90f19fbaa3c95950d861c00ff22e06478e4c802f94714c1ff09ed8
def nominate_stack_root(self, root_valve, other_valves, now, last_live_times, update_time): '\n Nominate a new stack root\n\n Args:\n root_valve (Valve): Previous/current root Valve object\n other_valves (list): List of other valves (not including previous root)\n now (float): Current time\n last_live_times (dict): Last live time value for each DP\n update_time (int): Stack root update interval time\n Returns:\n str: Name of the new elected stack root\n ' stack_valves = {valve for valve in other_valves if valve.dp.stack} if root_valve: stack_valves = {root_valve}.union(stack_valves) healthy_valves = [] unhealthy_valves = [] for valve in stack_valves: if valve.dp.stack.is_root_candidate(): healthy = valve.stack_manager.update_health(now, last_live_times, update_time) if healthy: healthy_valves.append(valve) else: unhealthy_valves.append(valve) if ((not healthy_valves) and (not unhealthy_valves)): return None if healthy_valves: new_root_name = None if root_valve: new_root_name = root_valve.dp.name if (root_valve not in healthy_valves): stacks = [valve.dp.stack for valve in healthy_valves] (_, new_root_name) = stacks[0].nominate_stack_root(stacks) else: stacks = [valve.dp.stack for valve in unhealthy_valves] (_, new_root_name) = stacks[0].nominate_stack_root(stacks) return new_root_name
Nominate a new stack root Args: root_valve (Valve): Previous/current root Valve object other_valves (list): List of other valves (not including previous root) now (float): Current time last_live_times (dict): Last live time value for each DP update_time (int): Stack root update interval time Returns: str: Name of the new elected stack root
faucet/valve_stack.py
nominate_stack_root
pbatta/faucet
0
python
def nominate_stack_root(self, root_valve, other_valves, now, last_live_times, update_time): '\n Nominate a new stack root\n\n Args:\n root_valve (Valve): Previous/current root Valve object\n other_valves (list): List of other valves (not including previous root)\n now (float): Current time\n last_live_times (dict): Last live time value for each DP\n update_time (int): Stack root update interval time\n Returns:\n str: Name of the new elected stack root\n ' stack_valves = {valve for valve in other_valves if valve.dp.stack} if root_valve: stack_valves = {root_valve}.union(stack_valves) healthy_valves = [] unhealthy_valves = [] for valve in stack_valves: if valve.dp.stack.is_root_candidate(): healthy = valve.stack_manager.update_health(now, last_live_times, update_time) if healthy: healthy_valves.append(valve) else: unhealthy_valves.append(valve) if ((not healthy_valves) and (not unhealthy_valves)): return None if healthy_valves: new_root_name = None if root_valve: new_root_name = root_valve.dp.name if (root_valve not in healthy_valves): stacks = [valve.dp.stack for valve in healthy_valves] (_, new_root_name) = stacks[0].nominate_stack_root(stacks) else: stacks = [valve.dp.stack for valve in unhealthy_valves] (_, new_root_name) = stacks[0].nominate_stack_root(stacks) return new_root_name
def nominate_stack_root(self, root_valve, other_valves, now, last_live_times, update_time): '\n Nominate a new stack root\n\n Args:\n root_valve (Valve): Previous/current root Valve object\n other_valves (list): List of other valves (not including previous root)\n now (float): Current time\n last_live_times (dict): Last live time value for each DP\n update_time (int): Stack root update interval time\n Returns:\n str: Name of the new elected stack root\n ' stack_valves = {valve for valve in other_valves if valve.dp.stack} if root_valve: stack_valves = {root_valve}.union(stack_valves) healthy_valves = [] unhealthy_valves = [] for valve in stack_valves: if valve.dp.stack.is_root_candidate(): healthy = valve.stack_manager.update_health(now, last_live_times, update_time) if healthy: healthy_valves.append(valve) else: unhealthy_valves.append(valve) if ((not healthy_valves) and (not unhealthy_valves)): return None if healthy_valves: new_root_name = None if root_valve: new_root_name = root_valve.dp.name if (root_valve not in healthy_valves): stacks = [valve.dp.stack for valve in healthy_valves] (_, new_root_name) = stacks[0].nominate_stack_root(stacks) else: stacks = [valve.dp.stack for valve in unhealthy_valves] (_, new_root_name) = stacks[0].nominate_stack_root(stacks) return new_root_name<|docstring|>Nominate a new stack root Args: root_valve (Valve): Previous/current root Valve object other_valves (list): List of other valves (not including previous root) now (float): Current time last_live_times (dict): Last live time value for each DP update_time (int): Stack root update interval time Returns: str: Name of the new elected stack root<|endoftext|>
b6235c6c2e1f8dcf44e7a58cb43fc355cf2eabe78ef016d0bd8b88a130f923d6
def stack_ports(self): 'Yield the stack ports of this stack node' for port in self.stack.ports: (yield port)
Yield the stack ports of this stack node
faucet/valve_stack.py
stack_ports
pbatta/faucet
0
python
def stack_ports(self): for port in self.stack.ports: (yield port)
def stack_ports(self): for port in self.stack.ports: (yield port)<|docstring|>Yield the stack ports of this stack node<|endoftext|>
55394c949396a3380f24059b1d99539baf27fd42f5140f0ed799e0d0941bec66
def is_stack_port(self, port): 'Return whether the port is a stack port' return bool(port.stack)
Return whether the port is a stack port
faucet/valve_stack.py
is_stack_port
pbatta/faucet
0
python
def is_stack_port(self, port): return bool(port.stack)
def is_stack_port(self, port): return bool(port.stack)<|docstring|>Return whether the port is a stack port<|endoftext|>
8a5b9a3058a4e11357f248e8291c20a92b8811439be824bfb48df2b3968d5ae9
def is_away(self, port): 'Return whether the port is an away port for the node' return (port in self.away_ports)
Return whether the port is an away port for the node
faucet/valve_stack.py
is_away
pbatta/faucet
0
python
def is_away(self, port): return (port in self.away_ports)
def is_away(self, port): return (port in self.away_ports)<|docstring|>Return whether the port is an away port for the node<|endoftext|>
f737913c00d6e6616edf58957bb0232e9756739a2ab6c39b1fb209c66a42ca78
def is_towards_root(self, port): 'Return whether the port is a port towards the root for the node' return (port in self.towards_root_ports)
Return whether the port is a port towards the root for the node
faucet/valve_stack.py
is_towards_root
pbatta/faucet
0
python
def is_towards_root(self, port): return (port in self.towards_root_ports)
def is_towards_root(self, port): return (port in self.towards_root_ports)<|docstring|>Return whether the port is a port towards the root for the node<|endoftext|>
70dfc60cda6adbe23aafb141d4cef42b348f810ac6c315d16a881ce1508999e5
def is_selected_towards_root_port(self, port): 'Return true if the port is the chosen towards root port' return (port == self.chosen_towards_port)
Return true if the port is the chosen towards root port
faucet/valve_stack.py
is_selected_towards_root_port
pbatta/faucet
0
python
def is_selected_towards_root_port(self, port): return (port == self.chosen_towards_port)
def is_selected_towards_root_port(self, port): return (port == self.chosen_towards_port)<|docstring|>Return true if the port is the chosen towards root port<|endoftext|>
bf9a24f96f975e3f4dac61561511e6529ad95b2692d5d0e23118520f8b25b326
def is_pruned_port(self, port): 'Return true if the port is to be pruned' if self.is_towards_root(port): return (not self.is_selected_towards_root_port(port)) if self.is_away(port): if self.pruned_away_ports: return (port in self.pruned_away_ports) return False return True
Return true if the port is to be pruned
faucet/valve_stack.py
is_pruned_port
pbatta/faucet
0
python
def is_pruned_port(self, port): if self.is_towards_root(port): return (not self.is_selected_towards_root_port(port)) if self.is_away(port): if self.pruned_away_ports: return (port in self.pruned_away_ports) return False return True
def is_pruned_port(self, port): if self.is_towards_root(port): return (not self.is_selected_towards_root_port(port)) if self.is_away(port): if self.pruned_away_ports: return (port in self.pruned_away_ports) return False return True<|docstring|>Return true if the port is to be pruned<|endoftext|>
b3ce9b029eeb261491f9062abc74cfa191759083dbbf69dd7a7aa2495f032be7
def adjacent_stack_ports(self, peer_dp): 'Return list of ports that connect to an adjacent DP' return [port for port in self.stack.ports if (port.stack['dp'] == peer_dp)]
Return list of ports that connect to an adjacent DP
faucet/valve_stack.py
adjacent_stack_ports
pbatta/faucet
0
python
def adjacent_stack_ports(self, peer_dp): return [port for port in self.stack.ports if (port.stack['dp'] == peer_dp)]
def adjacent_stack_ports(self, peer_dp): return [port for port in self.stack.ports if (port.stack['dp'] == peer_dp)]<|docstring|>Return list of ports that connect to an adjacent DP<|endoftext|>
2ca7fdee2d2c1c1d689756a2126a949fdb2ae39e747b3f56972e8246f8378496
def acl_update_tunnel(self, acl): 'Return ofmsgs for all tunnels in an ACL with a tunnel rule' ofmsgs = [] source_vids = defaultdict(list) for (_id, tunnel_dest) in acl.tunnel_dests.items(): (dst_dp, dst_port) = (tunnel_dest['dst_dp'], tunnel_dest['dst_port']) updated_sources = [] for (source_id, source) in acl.tunnel_sources.items(): src_dp = source['dp'] out_port = self.tunnel_outport(src_dp, dst_dp, dst_port) if out_port: updated = acl.update_source_tunnel_rules(self.stack.name, source_id, _id, out_port) if updated: if (self.stack.name == src_dp): source_vids[source_id].append(_id) else: updated_sources.append(source_id) for source_id in updated_sources: ofmsgs.extend(self.acl_manager.build_tunnel_rules_ofmsgs(source_id, _id, acl)) for (source_id, vids) in source_vids.items(): for vid in vids: ofmsgs.extend(self.acl_manager.build_tunnel_acl_rule_ofmsgs(source_id, vid, acl)) return ofmsgs
Return ofmsgs for all tunnels in an ACL with a tunnel rule
faucet/valve_stack.py
acl_update_tunnel
pbatta/faucet
0
python
def acl_update_tunnel(self, acl): ofmsgs = [] source_vids = defaultdict(list) for (_id, tunnel_dest) in acl.tunnel_dests.items(): (dst_dp, dst_port) = (tunnel_dest['dst_dp'], tunnel_dest['dst_port']) updated_sources = [] for (source_id, source) in acl.tunnel_sources.items(): src_dp = source['dp'] out_port = self.tunnel_outport(src_dp, dst_dp, dst_port) if out_port: updated = acl.update_source_tunnel_rules(self.stack.name, source_id, _id, out_port) if updated: if (self.stack.name == src_dp): source_vids[source_id].append(_id) else: updated_sources.append(source_id) for source_id in updated_sources: ofmsgs.extend(self.acl_manager.build_tunnel_rules_ofmsgs(source_id, _id, acl)) for (source_id, vids) in source_vids.items(): for vid in vids: ofmsgs.extend(self.acl_manager.build_tunnel_acl_rule_ofmsgs(source_id, vid, acl)) return ofmsgs
def acl_update_tunnel(self, acl): ofmsgs = [] source_vids = defaultdict(list) for (_id, tunnel_dest) in acl.tunnel_dests.items(): (dst_dp, dst_port) = (tunnel_dest['dst_dp'], tunnel_dest['dst_port']) updated_sources = [] for (source_id, source) in acl.tunnel_sources.items(): src_dp = source['dp'] out_port = self.tunnel_outport(src_dp, dst_dp, dst_port) if out_port: updated = acl.update_source_tunnel_rules(self.stack.name, source_id, _id, out_port) if updated: if (self.stack.name == src_dp): source_vids[source_id].append(_id) else: updated_sources.append(source_id) for source_id in updated_sources: ofmsgs.extend(self.acl_manager.build_tunnel_rules_ofmsgs(source_id, _id, acl)) for (source_id, vids) in source_vids.items(): for vid in vids: ofmsgs.extend(self.acl_manager.build_tunnel_acl_rule_ofmsgs(source_id, vid, acl)) return ofmsgs<|docstring|>Return ofmsgs for all tunnels in an ACL with a tunnel rule<|endoftext|>
8821ca592f967fd9602dc533b63ba8c3a3ebaffc485521565a85800b7aeafb4f
def add_tunnel_acls(self): 'Returns ofmsgs installing the tunnel path rules' ofmsgs = [] if self.tunnel_acls: for acl in self.tunnel_acls: ofmsgs.extend(self.acl_update_tunnel(acl)) return ofmsgs
Returns ofmsgs installing the tunnel path rules
faucet/valve_stack.py
add_tunnel_acls
pbatta/faucet
0
python
def add_tunnel_acls(self): ofmsgs = [] if self.tunnel_acls: for acl in self.tunnel_acls: ofmsgs.extend(self.acl_update_tunnel(acl)) return ofmsgs
def add_tunnel_acls(self): ofmsgs = [] if self.tunnel_acls: for acl in self.tunnel_acls: ofmsgs.extend(self.acl_update_tunnel(acl)) return ofmsgs<|docstring|>Returns ofmsgs installing the tunnel path rules<|endoftext|>
c978c7595a43b9ad686cd0b92c315c03e6a338869eb230ecaea6d1d715401aca
def categorical_error(pred, label): '\n Compute categorical error given score vectors and labels as\n numpy.ndarray.\n ' pred_label = pred.argmax(1) return (pred_label != label.flat).mean()
Compute categorical error given score vectors and labels as numpy.ndarray.
mnist-collection/classification_bnn.py
categorical_error
saulocatharino/nnabla-examples
228
python
def categorical_error(pred, label): '\n Compute categorical error given score vectors and labels as\n numpy.ndarray.\n ' pred_label = pred.argmax(1) return (pred_label != label.flat).mean()
def categorical_error(pred, label): '\n Compute categorical error given score vectors and labels as\n numpy.ndarray.\n ' pred_label = pred.argmax(1) return (pred_label != label.flat).mean()<|docstring|>Compute categorical error given score vectors and labels as numpy.ndarray.<|endoftext|>
d5c90b569f2c84757cb12a1acb9d50b12328ddda344d00c9a3dff3541d6945d0
def mnist_binary_connect_lenet_prediction(image, test=False): '\n Construct LeNet for MNIST (BinaryNet version).\n ' with nn.parameter_scope('conv1'): c1 = PF.binary_connect_convolution(image, 16, (5, 5)) c1 = PF.batch_normalization(c1, batch_stat=(not test)) c1 = F.elu(F.average_pooling(c1, (2, 2))) with nn.parameter_scope('conv2'): c2 = PF.binary_connect_convolution(c1, 16, (5, 5)) c2 = PF.batch_normalization(c2, batch_stat=(not test)) c2 = F.elu(F.average_pooling(c2, (2, 2))) with nn.parameter_scope('fc3'): c3 = PF.binary_connect_affine(c2, 50) c3 = PF.batch_normalization(c3, batch_stat=(not test)) c3 = F.elu(c3) with nn.parameter_scope('fc4'): c4 = PF.binary_connect_affine(c3, 10) c4 = PF.batch_normalization(c4, batch_stat=(not test)) return c4
Construct LeNet for MNIST (BinaryNet version).
mnist-collection/classification_bnn.py
mnist_binary_connect_lenet_prediction
saulocatharino/nnabla-examples
228
python
def mnist_binary_connect_lenet_prediction(image, test=False): '\n \n ' with nn.parameter_scope('conv1'): c1 = PF.binary_connect_convolution(image, 16, (5, 5)) c1 = PF.batch_normalization(c1, batch_stat=(not test)) c1 = F.elu(F.average_pooling(c1, (2, 2))) with nn.parameter_scope('conv2'): c2 = PF.binary_connect_convolution(c1, 16, (5, 5)) c2 = PF.batch_normalization(c2, batch_stat=(not test)) c2 = F.elu(F.average_pooling(c2, (2, 2))) with nn.parameter_scope('fc3'): c3 = PF.binary_connect_affine(c2, 50) c3 = PF.batch_normalization(c3, batch_stat=(not test)) c3 = F.elu(c3) with nn.parameter_scope('fc4'): c4 = PF.binary_connect_affine(c3, 10) c4 = PF.batch_normalization(c4, batch_stat=(not test)) return c4
def mnist_binary_connect_lenet_prediction(image, test=False): '\n \n ' with nn.parameter_scope('conv1'): c1 = PF.binary_connect_convolution(image, 16, (5, 5)) c1 = PF.batch_normalization(c1, batch_stat=(not test)) c1 = F.elu(F.average_pooling(c1, (2, 2))) with nn.parameter_scope('conv2'): c2 = PF.binary_connect_convolution(c1, 16, (5, 5)) c2 = PF.batch_normalization(c2, batch_stat=(not test)) c2 = F.elu(F.average_pooling(c2, (2, 2))) with nn.parameter_scope('fc3'): c3 = PF.binary_connect_affine(c2, 50) c3 = PF.batch_normalization(c3, batch_stat=(not test)) c3 = F.elu(c3) with nn.parameter_scope('fc4'): c4 = PF.binary_connect_affine(c3, 10) c4 = PF.batch_normalization(c4, batch_stat=(not test)) return c4<|docstring|>Construct LeNet for MNIST (BinaryNet version).<|endoftext|>
903b8bd2e9fad51a954f38a2fcd5bd3d61eaa98fbba34f2225a4afeb79278a78
def mnist_binary_connect_resnet_prediction(image, test=False): '\n Construct ResNet for MNIST (BinaryNet version).\n ' def bn(x): return PF.batch_normalization(x, batch_stat=(not test)) def res_unit(x, scope): C = x.shape[1] with nn.parameter_scope(scope): with nn.parameter_scope('conv1'): h = F.elu(bn(PF.binary_connect_convolution(x, (C / 2), (1, 1), with_bias=False))) with nn.parameter_scope('conv2'): h = F.elu(bn(PF.binary_connect_convolution(h, (C / 2), (3, 3), pad=(1, 1), with_bias=False))) with nn.parameter_scope('conv3'): h = bn(PF.binary_connect_convolution(h, C, (1, 1), with_bias=False)) return F.elu((x + h)) with nn.parameter_scope('conv1'): c1 = F.elu(bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False))) c2 = F.max_pooling(res_unit(c1, 'conv2'), (2, 2)) c3 = F.max_pooling(res_unit(c2, 'conv3'), (2, 2)) c4 = res_unit(c3, 'conv4') c5 = F.max_pooling(res_unit(c4, 'conv5'), (2, 2)) c6 = res_unit(c5, 'conv6') pl = F.average_pooling(c6, (4, 4)) with nn.parameter_scope('classifier'): y = bn(PF.binary_connect_affine(pl, 10)) return y
Construct ResNet for MNIST (BinaryNet version).
mnist-collection/classification_bnn.py
mnist_binary_connect_resnet_prediction
saulocatharino/nnabla-examples
228
python
def mnist_binary_connect_resnet_prediction(image, test=False): '\n \n ' def bn(x): return PF.batch_normalization(x, batch_stat=(not test)) def res_unit(x, scope): C = x.shape[1] with nn.parameter_scope(scope): with nn.parameter_scope('conv1'): h = F.elu(bn(PF.binary_connect_convolution(x, (C / 2), (1, 1), with_bias=False))) with nn.parameter_scope('conv2'): h = F.elu(bn(PF.binary_connect_convolution(h, (C / 2), (3, 3), pad=(1, 1), with_bias=False))) with nn.parameter_scope('conv3'): h = bn(PF.binary_connect_convolution(h, C, (1, 1), with_bias=False)) return F.elu((x + h)) with nn.parameter_scope('conv1'): c1 = F.elu(bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False))) c2 = F.max_pooling(res_unit(c1, 'conv2'), (2, 2)) c3 = F.max_pooling(res_unit(c2, 'conv3'), (2, 2)) c4 = res_unit(c3, 'conv4') c5 = F.max_pooling(res_unit(c4, 'conv5'), (2, 2)) c6 = res_unit(c5, 'conv6') pl = F.average_pooling(c6, (4, 4)) with nn.parameter_scope('classifier'): y = bn(PF.binary_connect_affine(pl, 10)) return y
def mnist_binary_connect_resnet_prediction(image, test=False): '\n \n ' def bn(x): return PF.batch_normalization(x, batch_stat=(not test)) def res_unit(x, scope): C = x.shape[1] with nn.parameter_scope(scope): with nn.parameter_scope('conv1'): h = F.elu(bn(PF.binary_connect_convolution(x, (C / 2), (1, 1), with_bias=False))) with nn.parameter_scope('conv2'): h = F.elu(bn(PF.binary_connect_convolution(h, (C / 2), (3, 3), pad=(1, 1), with_bias=False))) with nn.parameter_scope('conv3'): h = bn(PF.binary_connect_convolution(h, C, (1, 1), with_bias=False)) return F.elu((x + h)) with nn.parameter_scope('conv1'): c1 = F.elu(bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False))) c2 = F.max_pooling(res_unit(c1, 'conv2'), (2, 2)) c3 = F.max_pooling(res_unit(c2, 'conv3'), (2, 2)) c4 = res_unit(c3, 'conv4') c5 = F.max_pooling(res_unit(c4, 'conv5'), (2, 2)) c6 = res_unit(c5, 'conv6') pl = F.average_pooling(c6, (4, 4)) with nn.parameter_scope('classifier'): y = bn(PF.binary_connect_affine(pl, 10)) return y<|docstring|>Construct ResNet for MNIST (BinaryNet version).<|endoftext|>
0cf7ad2acceb70c53a06627430e6b269f726a982bdf6387c17087483c2b03f93
def mnist_binary_net_lenet_prediction(image, test=False): '\n Construct LeNet for MNIST (BinaryNet version).\n ' with nn.parameter_scope('conv1'): c1 = PF.binary_connect_convolution(image, 16, (5, 5)) c1 = PF.batch_normalization(c1, batch_stat=(not test)) c1 = F.binary_tanh(F.average_pooling(c1, (2, 2))) with nn.parameter_scope('conv2'): c2 = PF.binary_connect_convolution(c1, 16, (5, 5)) c2 = PF.batch_normalization(c2, batch_stat=(not test)) c2 = F.binary_tanh(F.average_pooling(c2, (2, 2))) with nn.parameter_scope('fc3'): c3 = PF.binary_connect_affine(c2, 50) c3 = PF.batch_normalization(c3, batch_stat=(not test)) c3 = F.binary_tanh(c3) with nn.parameter_scope('fc4'): c4 = PF.binary_connect_affine(c3, 10) c4 = PF.batch_normalization(c4, batch_stat=(not test)) return c4
Construct LeNet for MNIST (BinaryNet version).
mnist-collection/classification_bnn.py
mnist_binary_net_lenet_prediction
saulocatharino/nnabla-examples
228
python
def mnist_binary_net_lenet_prediction(image, test=False): '\n \n ' with nn.parameter_scope('conv1'): c1 = PF.binary_connect_convolution(image, 16, (5, 5)) c1 = PF.batch_normalization(c1, batch_stat=(not test)) c1 = F.binary_tanh(F.average_pooling(c1, (2, 2))) with nn.parameter_scope('conv2'): c2 = PF.binary_connect_convolution(c1, 16, (5, 5)) c2 = PF.batch_normalization(c2, batch_stat=(not test)) c2 = F.binary_tanh(F.average_pooling(c2, (2, 2))) with nn.parameter_scope('fc3'): c3 = PF.binary_connect_affine(c2, 50) c3 = PF.batch_normalization(c3, batch_stat=(not test)) c3 = F.binary_tanh(c3) with nn.parameter_scope('fc4'): c4 = PF.binary_connect_affine(c3, 10) c4 = PF.batch_normalization(c4, batch_stat=(not test)) return c4
def mnist_binary_net_lenet_prediction(image, test=False): '\n \n ' with nn.parameter_scope('conv1'): c1 = PF.binary_connect_convolution(image, 16, (5, 5)) c1 = PF.batch_normalization(c1, batch_stat=(not test)) c1 = F.binary_tanh(F.average_pooling(c1, (2, 2))) with nn.parameter_scope('conv2'): c2 = PF.binary_connect_convolution(c1, 16, (5, 5)) c2 = PF.batch_normalization(c2, batch_stat=(not test)) c2 = F.binary_tanh(F.average_pooling(c2, (2, 2))) with nn.parameter_scope('fc3'): c3 = PF.binary_connect_affine(c2, 50) c3 = PF.batch_normalization(c3, batch_stat=(not test)) c3 = F.binary_tanh(c3) with nn.parameter_scope('fc4'): c4 = PF.binary_connect_affine(c3, 10) c4 = PF.batch_normalization(c4, batch_stat=(not test)) return c4<|docstring|>Construct LeNet for MNIST (BinaryNet version).<|endoftext|>
5b705b00e30647bc8ae1964a57b01f3c5660e3e6e501741ba956dd700ac22f35
def mnist_binary_net_resnet_prediction(image, test=False): '\n Construct ResNet for MNIST (BinaryNet version).\n ' def bn(x): return PF.batch_normalization(x, batch_stat=(not test)) def res_unit(x, scope): C = x.shape[1] with nn.parameter_scope(scope): with nn.parameter_scope('conv1'): h = F.binary_tanh(bn(PF.binary_connect_convolution(x, (C / 2), (1, 1), with_bias=False))) with nn.parameter_scope('conv2'): h = F.binary_tanh(bn(PF.binary_connect_convolution(h, (C / 2), (3, 3), pad=(1, 1), with_bias=False))) with nn.parameter_scope('conv3'): h = bn(PF.binary_connect_convolution(h, C, (1, 1), with_bias=False)) return F.binary_tanh((x + h)) with nn.parameter_scope('conv1'): c1 = F.binary_tanh(bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False))) c2 = F.max_pooling(res_unit(c1, 'conv2'), (2, 2)) c3 = F.max_pooling(res_unit(c2, 'conv3'), (2, 2)) c4 = res_unit(c3, 'conv4') c5 = F.max_pooling(res_unit(c4, 'conv5'), (2, 2)) c6 = res_unit(c5, 'conv6') pl = F.average_pooling(c6, (4, 4)) with nn.parameter_scope('classifier'): y = bn(PF.binary_connect_affine(pl, 10)) return y
Construct ResNet for MNIST (BinaryNet version).
mnist-collection/classification_bnn.py
mnist_binary_net_resnet_prediction
saulocatharino/nnabla-examples
228
python
def mnist_binary_net_resnet_prediction(image, test=False): '\n \n ' def bn(x): return PF.batch_normalization(x, batch_stat=(not test)) def res_unit(x, scope): C = x.shape[1] with nn.parameter_scope(scope): with nn.parameter_scope('conv1'): h = F.binary_tanh(bn(PF.binary_connect_convolution(x, (C / 2), (1, 1), with_bias=False))) with nn.parameter_scope('conv2'): h = F.binary_tanh(bn(PF.binary_connect_convolution(h, (C / 2), (3, 3), pad=(1, 1), with_bias=False))) with nn.parameter_scope('conv3'): h = bn(PF.binary_connect_convolution(h, C, (1, 1), with_bias=False)) return F.binary_tanh((x + h)) with nn.parameter_scope('conv1'): c1 = F.binary_tanh(bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False))) c2 = F.max_pooling(res_unit(c1, 'conv2'), (2, 2)) c3 = F.max_pooling(res_unit(c2, 'conv3'), (2, 2)) c4 = res_unit(c3, 'conv4') c5 = F.max_pooling(res_unit(c4, 'conv5'), (2, 2)) c6 = res_unit(c5, 'conv6') pl = F.average_pooling(c6, (4, 4)) with nn.parameter_scope('classifier'): y = bn(PF.binary_connect_affine(pl, 10)) return y
def mnist_binary_net_resnet_prediction(image, test=False): '\n \n ' def bn(x): return PF.batch_normalization(x, batch_stat=(not test)) def res_unit(x, scope): C = x.shape[1] with nn.parameter_scope(scope): with nn.parameter_scope('conv1'): h = F.binary_tanh(bn(PF.binary_connect_convolution(x, (C / 2), (1, 1), with_bias=False))) with nn.parameter_scope('conv2'): h = F.binary_tanh(bn(PF.binary_connect_convolution(h, (C / 2), (3, 3), pad=(1, 1), with_bias=False))) with nn.parameter_scope('conv3'): h = bn(PF.binary_connect_convolution(h, C, (1, 1), with_bias=False)) return F.binary_tanh((x + h)) with nn.parameter_scope('conv1'): c1 = F.binary_tanh(bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False))) c2 = F.max_pooling(res_unit(c1, 'conv2'), (2, 2)) c3 = F.max_pooling(res_unit(c2, 'conv3'), (2, 2)) c4 = res_unit(c3, 'conv4') c5 = F.max_pooling(res_unit(c4, 'conv5'), (2, 2)) c6 = res_unit(c5, 'conv6') pl = F.average_pooling(c6, (4, 4)) with nn.parameter_scope('classifier'): y = bn(PF.binary_connect_affine(pl, 10)) return y<|docstring|>Construct ResNet for MNIST (BinaryNet version).<|endoftext|>
3d6b1c13f621c03aef3b0d94e99491bcd9a8a785cbb1190d1052348dae111d98
def mnist_binary_weight_lenet_prediction(image, test=False): '\n Construct LeNet for MNIST (Binary Weight Network version).\n ' with nn.parameter_scope('conv1'): c1 = PF.binary_weight_convolution(image, 16, (5, 5)) c1 = F.elu(F.average_pooling(c1, (2, 2))) with nn.parameter_scope('conv2'): c2 = PF.binary_weight_convolution(c1, 16, (5, 5)) c2 = F.elu(F.average_pooling(c2, (2, 2))) with nn.parameter_scope('fc3'): c3 = F.elu(PF.binary_weight_affine(c2, 50)) with nn.parameter_scope('fc4'): c4 = PF.binary_weight_affine(c3, 10) return c4
Construct LeNet for MNIST (Binary Weight Network version).
mnist-collection/classification_bnn.py
mnist_binary_weight_lenet_prediction
saulocatharino/nnabla-examples
228
python
def mnist_binary_weight_lenet_prediction(image, test=False): '\n \n ' with nn.parameter_scope('conv1'): c1 = PF.binary_weight_convolution(image, 16, (5, 5)) c1 = F.elu(F.average_pooling(c1, (2, 2))) with nn.parameter_scope('conv2'): c2 = PF.binary_weight_convolution(c1, 16, (5, 5)) c2 = F.elu(F.average_pooling(c2, (2, 2))) with nn.parameter_scope('fc3'): c3 = F.elu(PF.binary_weight_affine(c2, 50)) with nn.parameter_scope('fc4'): c4 = PF.binary_weight_affine(c3, 10) return c4
def mnist_binary_weight_lenet_prediction(image, test=False): '\n \n ' with nn.parameter_scope('conv1'): c1 = PF.binary_weight_convolution(image, 16, (5, 5)) c1 = F.elu(F.average_pooling(c1, (2, 2))) with nn.parameter_scope('conv2'): c2 = PF.binary_weight_convolution(c1, 16, (5, 5)) c2 = F.elu(F.average_pooling(c2, (2, 2))) with nn.parameter_scope('fc3'): c3 = F.elu(PF.binary_weight_affine(c2, 50)) with nn.parameter_scope('fc4'): c4 = PF.binary_weight_affine(c3, 10) return c4<|docstring|>Construct LeNet for MNIST (Binary Weight Network version).<|endoftext|>
2d48cc3aaf126030a312021917264dcc98f7903c5d1458420d4ed30f141b2d5e
def mnist_binary_weight_resnet_prediction(image, test=False): '\n Construct ResNet for MNIST (Binary Weight Network version).\n ' def bn(x): return PF.batch_normalization(x, batch_stat=(not test)) def res_unit(x, scope): C = x.shape[1] with nn.parameter_scope(scope): with nn.parameter_scope('conv1'): h = F.elu(bn(PF.binary_weight_convolution(x, (C / 2), (1, 1), with_bias=False))) with nn.parameter_scope('conv2'): h = F.elu(bn(PF.binary_weight_convolution(h, (C / 2), (3, 3), pad=(1, 1), with_bias=False))) with nn.parameter_scope('conv3'): h = bn(PF.binary_weight_convolution(h, C, (1, 1), with_bias=False)) return F.elu((x + h)) with nn.parameter_scope('conv1'): c1 = F.elu(bn(PF.binary_weight_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False))) c2 = F.max_pooling(res_unit(c1, 'conv2'), (2, 2)) c3 = F.max_pooling(res_unit(c2, 'conv3'), (2, 2)) c4 = res_unit(c3, 'conv4') c5 = F.max_pooling(res_unit(c4, 'conv5'), (2, 2)) c6 = res_unit(c5, 'conv6') pl = F.average_pooling(c6, (4, 4)) with nn.parameter_scope('classifier'): y = PF.binary_weight_affine(pl, 10) return y
Construct ResNet for MNIST (Binary Weight Network version).
mnist-collection/classification_bnn.py
mnist_binary_weight_resnet_prediction
saulocatharino/nnabla-examples
228
python
def mnist_binary_weight_resnet_prediction(image, test=False): '\n \n ' def bn(x): return PF.batch_normalization(x, batch_stat=(not test)) def res_unit(x, scope): C = x.shape[1] with nn.parameter_scope(scope): with nn.parameter_scope('conv1'): h = F.elu(bn(PF.binary_weight_convolution(x, (C / 2), (1, 1), with_bias=False))) with nn.parameter_scope('conv2'): h = F.elu(bn(PF.binary_weight_convolution(h, (C / 2), (3, 3), pad=(1, 1), with_bias=False))) with nn.parameter_scope('conv3'): h = bn(PF.binary_weight_convolution(h, C, (1, 1), with_bias=False)) return F.elu((x + h)) with nn.parameter_scope('conv1'): c1 = F.elu(bn(PF.binary_weight_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False))) c2 = F.max_pooling(res_unit(c1, 'conv2'), (2, 2)) c3 = F.max_pooling(res_unit(c2, 'conv3'), (2, 2)) c4 = res_unit(c3, 'conv4') c5 = F.max_pooling(res_unit(c4, 'conv5'), (2, 2)) c6 = res_unit(c5, 'conv6') pl = F.average_pooling(c6, (4, 4)) with nn.parameter_scope('classifier'): y = PF.binary_weight_affine(pl, 10) return y
def mnist_binary_weight_resnet_prediction(image, test=False): '\n \n ' def bn(x): return PF.batch_normalization(x, batch_stat=(not test)) def res_unit(x, scope): C = x.shape[1] with nn.parameter_scope(scope): with nn.parameter_scope('conv1'): h = F.elu(bn(PF.binary_weight_convolution(x, (C / 2), (1, 1), with_bias=False))) with nn.parameter_scope('conv2'): h = F.elu(bn(PF.binary_weight_convolution(h, (C / 2), (3, 3), pad=(1, 1), with_bias=False))) with nn.parameter_scope('conv3'): h = bn(PF.binary_weight_convolution(h, C, (1, 1), with_bias=False)) return F.elu((x + h)) with nn.parameter_scope('conv1'): c1 = F.elu(bn(PF.binary_weight_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False))) c2 = F.max_pooling(res_unit(c1, 'conv2'), (2, 2)) c3 = F.max_pooling(res_unit(c2, 'conv3'), (2, 2)) c4 = res_unit(c3, 'conv4') c5 = F.max_pooling(res_unit(c4, 'conv5'), (2, 2)) c6 = res_unit(c5, 'conv6') pl = F.average_pooling(c6, (4, 4)) with nn.parameter_scope('classifier'): y = PF.binary_weight_affine(pl, 10) return y<|docstring|>Construct ResNet for MNIST (Binary Weight Network version).<|endoftext|>
39548ab799c6a52f1ea3993c0788ddf472c35f8560558e21e9c3c34396ab5296
def train(): '\n Main script.\n\n Steps:\n\n * Parse command line arguments.\n * Specify a context for computation.\n * Initialize DataIterator for MNIST.\n * Construct a computation graph for training and validation.\n * Initialize a solver and set parameter variables to it.\n * Create monitor instances for saving and displaying training stats.\n * Training loop\n * Computate error rate for validation data (periodically)\n * Get a next minibatch.\n * Set parameter gradients zero\n * Execute forwardprop on the training graph.\n * Execute backprop.\n * Solver updates parameters by using gradients computed by backprop.\n * Compute training error\n ' args = get_args(monitor_path='tmp.monitor.bnn') from nnabla.ext_utils import get_extension_context logger.info(('Running in %s' % args.context)) ctx = get_extension_context(args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) data = data_iterator_mnist(args.batch_size, True) vdata = data_iterator_mnist(args.batch_size, False) mnist_cnn_prediction = mnist_binary_connect_lenet_prediction if (args.net == 'bincon'): mnist_cnn_prediction = mnist_binary_connect_lenet_prediction elif (args.net == 'binnet'): mnist_cnn_prediction = mnist_binary_net_lenet_prediction elif (args.net == 'bwn'): mnist_cnn_prediction = mnist_binary_weight_lenet_prediction elif (args.net == 'bincon_resnet'): mnist_cnn_prediction = mnist_binary_connect_resnet_prediction elif (args.net == 'binnet_resnet'): mnist_cnn_prediction = mnist_binary_net_resnet_prediction elif (args.net == 'bwn_resnet'): mnist_cnn_prediction = mnist_binary_weight_resnet_prediction image = nn.Variable([args.batch_size, 1, 28, 28]) label = nn.Variable([args.batch_size, 1]) pred = mnist_cnn_prediction((image / 255), test=False) pred.persistent = True loss = F.mean(F.softmax_cross_entropy(pred, label)) vimage = nn.Variable([args.batch_size, 1, 28, 28]) vlabel = nn.Variable([args.batch_size, 1]) vpred = mnist_cnn_prediction((vimage / 255), test=True) solver = S.Adam(args.learning_rate) solver.set_parameters(nn.get_parameters()) start_point = 0 if (args.checkpoint is not None): start_point = load_checkpoint(args.checkpoint, solver) import nnabla.monitor as M monitor = M.Monitor(args.monitor_path) monitor_loss = M.MonitorSeries('Training loss', monitor, interval=10) monitor_err = M.MonitorSeries('Training error', monitor, interval=10) monitor_time = M.MonitorTimeElapsed('Training time', monitor, interval=100) monitor_verr = M.MonitorSeries('Test error', monitor, interval=10) contents = save_nnp({'x': vimage}, {'y': vpred}, args.batch_size) save.save(os.path.join(args.model_save_path, '{}_result_epoch0.nnp'.format(args.net)), contents) for i in range(start_point, args.max_iter): if ((i % args.val_interval) == 0): ve = 0.0 for j in range(args.val_iter): (vimage.d, vlabel.d) = vdata.next() vpred.forward(clear_buffer=True) ve += categorical_error(vpred.d, vlabel.d) monitor_verr.add(i, (ve / args.val_iter)) if ((i % args.model_save_interval) == 0): save_checkpoint(args.model_save_path, i, solver) (image.d, label.d) = data.next() solver.zero_grad() loss.forward(clear_no_need_grad=True) loss.backward(clear_buffer=True) solver.weight_decay(args.weight_decay) solver.update() e = categorical_error(pred.d, label.d) monitor_loss.add(i, loss.d.copy()) monitor_err.add(i, e) monitor_time.add(i) parameter_file = os.path.join(args.model_save_path, ('params_%06d.h5' % args.max_iter)) nn.save_parameters(parameter_file) contents = save_nnp({'x': vimage}, {'y': vpred}, args.batch_size) save.save(os.path.join(args.model_save_path, '{}_result.nnp'.format(args.net)), contents)
Main script. Steps: * Parse command line arguments. * Specify a context for computation. * Initialize DataIterator for MNIST. * Construct a computation graph for training and validation. * Initialize a solver and set parameter variables to it. * Create monitor instances for saving and displaying training stats. * Training loop * Computate error rate for validation data (periodically) * Get a next minibatch. * Set parameter gradients zero * Execute forwardprop on the training graph. * Execute backprop. * Solver updates parameters by using gradients computed by backprop. * Compute training error
mnist-collection/classification_bnn.py
train
saulocatharino/nnabla-examples
228
python
def train(): '\n Main script.\n\n Steps:\n\n * Parse command line arguments.\n * Specify a context for computation.\n * Initialize DataIterator for MNIST.\n * Construct a computation graph for training and validation.\n * Initialize a solver and set parameter variables to it.\n * Create monitor instances for saving and displaying training stats.\n * Training loop\n * Computate error rate for validation data (periodically)\n * Get a next minibatch.\n * Set parameter gradients zero\n * Execute forwardprop on the training graph.\n * Execute backprop.\n * Solver updates parameters by using gradients computed by backprop.\n * Compute training error\n ' args = get_args(monitor_path='tmp.monitor.bnn') from nnabla.ext_utils import get_extension_context logger.info(('Running in %s' % args.context)) ctx = get_extension_context(args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) data = data_iterator_mnist(args.batch_size, True) vdata = data_iterator_mnist(args.batch_size, False) mnist_cnn_prediction = mnist_binary_connect_lenet_prediction if (args.net == 'bincon'): mnist_cnn_prediction = mnist_binary_connect_lenet_prediction elif (args.net == 'binnet'): mnist_cnn_prediction = mnist_binary_net_lenet_prediction elif (args.net == 'bwn'): mnist_cnn_prediction = mnist_binary_weight_lenet_prediction elif (args.net == 'bincon_resnet'): mnist_cnn_prediction = mnist_binary_connect_resnet_prediction elif (args.net == 'binnet_resnet'): mnist_cnn_prediction = mnist_binary_net_resnet_prediction elif (args.net == 'bwn_resnet'): mnist_cnn_prediction = mnist_binary_weight_resnet_prediction image = nn.Variable([args.batch_size, 1, 28, 28]) label = nn.Variable([args.batch_size, 1]) pred = mnist_cnn_prediction((image / 255), test=False) pred.persistent = True loss = F.mean(F.softmax_cross_entropy(pred, label)) vimage = nn.Variable([args.batch_size, 1, 28, 28]) vlabel = nn.Variable([args.batch_size, 1]) vpred = mnist_cnn_prediction((vimage / 255), test=True) solver = S.Adam(args.learning_rate) solver.set_parameters(nn.get_parameters()) start_point = 0 if (args.checkpoint is not None): start_point = load_checkpoint(args.checkpoint, solver) import nnabla.monitor as M monitor = M.Monitor(args.monitor_path) monitor_loss = M.MonitorSeries('Training loss', monitor, interval=10) monitor_err = M.MonitorSeries('Training error', monitor, interval=10) monitor_time = M.MonitorTimeElapsed('Training time', monitor, interval=100) monitor_verr = M.MonitorSeries('Test error', monitor, interval=10) contents = save_nnp({'x': vimage}, {'y': vpred}, args.batch_size) save.save(os.path.join(args.model_save_path, '{}_result_epoch0.nnp'.format(args.net)), contents) for i in range(start_point, args.max_iter): if ((i % args.val_interval) == 0): ve = 0.0 for j in range(args.val_iter): (vimage.d, vlabel.d) = vdata.next() vpred.forward(clear_buffer=True) ve += categorical_error(vpred.d, vlabel.d) monitor_verr.add(i, (ve / args.val_iter)) if ((i % args.model_save_interval) == 0): save_checkpoint(args.model_save_path, i, solver) (image.d, label.d) = data.next() solver.zero_grad() loss.forward(clear_no_need_grad=True) loss.backward(clear_buffer=True) solver.weight_decay(args.weight_decay) solver.update() e = categorical_error(pred.d, label.d) monitor_loss.add(i, loss.d.copy()) monitor_err.add(i, e) monitor_time.add(i) parameter_file = os.path.join(args.model_save_path, ('params_%06d.h5' % args.max_iter)) nn.save_parameters(parameter_file) contents = save_nnp({'x': vimage}, {'y': vpred}, args.batch_size) save.save(os.path.join(args.model_save_path, '{}_result.nnp'.format(args.net)), contents)
def train(): '\n Main script.\n\n Steps:\n\n * Parse command line arguments.\n * Specify a context for computation.\n * Initialize DataIterator for MNIST.\n * Construct a computation graph for training and validation.\n * Initialize a solver and set parameter variables to it.\n * Create monitor instances for saving and displaying training stats.\n * Training loop\n * Computate error rate for validation data (periodically)\n * Get a next minibatch.\n * Set parameter gradients zero\n * Execute forwardprop on the training graph.\n * Execute backprop.\n * Solver updates parameters by using gradients computed by backprop.\n * Compute training error\n ' args = get_args(monitor_path='tmp.monitor.bnn') from nnabla.ext_utils import get_extension_context logger.info(('Running in %s' % args.context)) ctx = get_extension_context(args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) data = data_iterator_mnist(args.batch_size, True) vdata = data_iterator_mnist(args.batch_size, False) mnist_cnn_prediction = mnist_binary_connect_lenet_prediction if (args.net == 'bincon'): mnist_cnn_prediction = mnist_binary_connect_lenet_prediction elif (args.net == 'binnet'): mnist_cnn_prediction = mnist_binary_net_lenet_prediction elif (args.net == 'bwn'): mnist_cnn_prediction = mnist_binary_weight_lenet_prediction elif (args.net == 'bincon_resnet'): mnist_cnn_prediction = mnist_binary_connect_resnet_prediction elif (args.net == 'binnet_resnet'): mnist_cnn_prediction = mnist_binary_net_resnet_prediction elif (args.net == 'bwn_resnet'): mnist_cnn_prediction = mnist_binary_weight_resnet_prediction image = nn.Variable([args.batch_size, 1, 28, 28]) label = nn.Variable([args.batch_size, 1]) pred = mnist_cnn_prediction((image / 255), test=False) pred.persistent = True loss = F.mean(F.softmax_cross_entropy(pred, label)) vimage = nn.Variable([args.batch_size, 1, 28, 28]) vlabel = nn.Variable([args.batch_size, 1]) vpred = mnist_cnn_prediction((vimage / 255), test=True) solver = S.Adam(args.learning_rate) solver.set_parameters(nn.get_parameters()) start_point = 0 if (args.checkpoint is not None): start_point = load_checkpoint(args.checkpoint, solver) import nnabla.monitor as M monitor = M.Monitor(args.monitor_path) monitor_loss = M.MonitorSeries('Training loss', monitor, interval=10) monitor_err = M.MonitorSeries('Training error', monitor, interval=10) monitor_time = M.MonitorTimeElapsed('Training time', monitor, interval=100) monitor_verr = M.MonitorSeries('Test error', monitor, interval=10) contents = save_nnp({'x': vimage}, {'y': vpred}, args.batch_size) save.save(os.path.join(args.model_save_path, '{}_result_epoch0.nnp'.format(args.net)), contents) for i in range(start_point, args.max_iter): if ((i % args.val_interval) == 0): ve = 0.0 for j in range(args.val_iter): (vimage.d, vlabel.d) = vdata.next() vpred.forward(clear_buffer=True) ve += categorical_error(vpred.d, vlabel.d) monitor_verr.add(i, (ve / args.val_iter)) if ((i % args.model_save_interval) == 0): save_checkpoint(args.model_save_path, i, solver) (image.d, label.d) = data.next() solver.zero_grad() loss.forward(clear_no_need_grad=True) loss.backward(clear_buffer=True) solver.weight_decay(args.weight_decay) solver.update() e = categorical_error(pred.d, label.d) monitor_loss.add(i, loss.d.copy()) monitor_err.add(i, e) monitor_time.add(i) parameter_file = os.path.join(args.model_save_path, ('params_%06d.h5' % args.max_iter)) nn.save_parameters(parameter_file) contents = save_nnp({'x': vimage}, {'y': vpred}, args.batch_size) save.save(os.path.join(args.model_save_path, '{}_result.nnp'.format(args.net)), contents)<|docstring|>Main script. Steps: * Parse command line arguments. * Specify a context for computation. * Initialize DataIterator for MNIST. * Construct a computation graph for training and validation. * Initialize a solver and set parameter variables to it. * Create monitor instances for saving and displaying training stats. * Training loop * Computate error rate for validation data (periodically) * Get a next minibatch. * Set parameter gradients zero * Execute forwardprop on the training graph. * Execute backprop. * Solver updates parameters by using gradients computed by backprop. * Compute training error<|endoftext|>
23abf706f815274a233a0442813bcec8b32eb151779ca6993733e7f7d3153303
def sort_by_seq_lens(args, batch, sequences_lengths, descending=True): "\n Sort a batch of padded variable length sequences by their length.\n\n Args:\n batch: A batch of padded variable length sequences. The batch should\n have the dimensions (batch_size x max_sequence_length x *).\n sequences_lengths: A tensor containing the lengths of the sequences in the\n input batch. The tensor should be of size (batch_size).\n descending: A boolean value indicating whether to sort the sequences\n by their lengths in descending order. Defaults to True.\n\n Returns:\n sorted_batch: A tensor containing the input batch reordered by\n sequences lengths.\n sorted_seq_lens: A tensor containing the sorted lengths of the\n sequences in the input batch.\n sorting_idx: A tensor containing the indices used to permute the input\n batch in order to get 'sorted_batch'.\n restoration_idx: A tensor containing the indices that can be used to\n restore the order of the sequences in 'sorted_batch' so that it\n matches the input batch.\n " (sorted_seq_lens, sorting_index) = sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range = sequences_lengths.new_tensor(torch.arange(0, len(sequences_lengths))).to(args.device) (_, reverse_mapping) = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return (sorted_batch, sorted_seq_lens, sorting_index, restoration_index)
Sort a batch of padded variable length sequences by their length. Args: batch: A batch of padded variable length sequences. The batch should have the dimensions (batch_size x max_sequence_length x *). sequences_lengths: A tensor containing the lengths of the sequences in the input batch. The tensor should be of size (batch_size). descending: A boolean value indicating whether to sort the sequences by their lengths in descending order. Defaults to True. Returns: sorted_batch: A tensor containing the input batch reordered by sequences lengths. sorted_seq_lens: A tensor containing the sorted lengths of the sequences in the input batch. sorting_idx: A tensor containing the indices used to permute the input batch in order to get 'sorted_batch'. restoration_idx: A tensor containing the indices that can be used to restore the order of the sequences in 'sorted_batch' so that it matches the input batch.
net/bert_lstm.py
sort_by_seq_lens
Wchoward/CskER
0
python
def sort_by_seq_lens(args, batch, sequences_lengths, descending=True): "\n Sort a batch of padded variable length sequences by their length.\n\n Args:\n batch: A batch of padded variable length sequences. The batch should\n have the dimensions (batch_size x max_sequence_length x *).\n sequences_lengths: A tensor containing the lengths of the sequences in the\n input batch. The tensor should be of size (batch_size).\n descending: A boolean value indicating whether to sort the sequences\n by their lengths in descending order. Defaults to True.\n\n Returns:\n sorted_batch: A tensor containing the input batch reordered by\n sequences lengths.\n sorted_seq_lens: A tensor containing the sorted lengths of the\n sequences in the input batch.\n sorting_idx: A tensor containing the indices used to permute the input\n batch in order to get 'sorted_batch'.\n restoration_idx: A tensor containing the indices that can be used to\n restore the order of the sequences in 'sorted_batch' so that it\n matches the input batch.\n " (sorted_seq_lens, sorting_index) = sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range = sequences_lengths.new_tensor(torch.arange(0, len(sequences_lengths))).to(args.device) (_, reverse_mapping) = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return (sorted_batch, sorted_seq_lens, sorting_index, restoration_index)
def sort_by_seq_lens(args, batch, sequences_lengths, descending=True): "\n Sort a batch of padded variable length sequences by their length.\n\n Args:\n batch: A batch of padded variable length sequences. The batch should\n have the dimensions (batch_size x max_sequence_length x *).\n sequences_lengths: A tensor containing the lengths of the sequences in the\n input batch. The tensor should be of size (batch_size).\n descending: A boolean value indicating whether to sort the sequences\n by their lengths in descending order. Defaults to True.\n\n Returns:\n sorted_batch: A tensor containing the input batch reordered by\n sequences lengths.\n sorted_seq_lens: A tensor containing the sorted lengths of the\n sequences in the input batch.\n sorting_idx: A tensor containing the indices used to permute the input\n batch in order to get 'sorted_batch'.\n restoration_idx: A tensor containing the indices that can be used to\n restore the order of the sequences in 'sorted_batch' so that it\n matches the input batch.\n " (sorted_seq_lens, sorting_index) = sequences_lengths.sort(0, descending=descending) sorted_batch = batch.index_select(0, sorting_index) idx_range = sequences_lengths.new_tensor(torch.arange(0, len(sequences_lengths))).to(args.device) (_, reverse_mapping) = sorting_index.sort(0, descending=False) restoration_index = idx_range.index_select(0, reverse_mapping) return (sorted_batch, sorted_seq_lens, sorting_index, restoration_index)<|docstring|>Sort a batch of padded variable length sequences by their length. Args: batch: A batch of padded variable length sequences. The batch should have the dimensions (batch_size x max_sequence_length x *). sequences_lengths: A tensor containing the lengths of the sequences in the input batch. The tensor should be of size (batch_size). descending: A boolean value indicating whether to sort the sequences by their lengths in descending order. Defaults to True. Returns: sorted_batch: A tensor containing the input batch reordered by sequences lengths. sorted_seq_lens: A tensor containing the sorted lengths of the sequences in the input batch. sorting_idx: A tensor containing the indices used to permute the input batch in order to get 'sorted_batch'. restoration_idx: A tensor containing the indices that can be used to restore the order of the sequences in 'sorted_batch' so that it matches the input batch.<|endoftext|>
afbedcde32341c1168df7233a690382f6b51577e877a03f7acdc7870f6851f43
def challenge_get(self, environ, start_response): '\n Respond to a GET request by sending a form.\n ' redirect = environ['tiddlyweb.query'].get('tiddlyweb_redirect', ['/'])[0] return self._send_cookie_form(environ, start_response, redirect)
Respond to a GET request by sending a form.
tiddlyweb/web/challengers/cookie_form.py
challenge_get
angeluseve/tiddlyweb
1
python
def challenge_get(self, environ, start_response): '\n \n ' redirect = environ['tiddlyweb.query'].get('tiddlyweb_redirect', ['/'])[0] return self._send_cookie_form(environ, start_response, redirect)
def challenge_get(self, environ, start_response): '\n \n ' redirect = environ['tiddlyweb.query'].get('tiddlyweb_redirect', ['/'])[0] return self._send_cookie_form(environ, start_response, redirect)<|docstring|>Respond to a GET request by sending a form.<|endoftext|>
87d23ef54187c576c80c86764d0de9540e90293372bd463a7daa2cdc3cd7dede
def challenge_post(self, environ, start_response): '\n Respond to a POST by processing data sent from a form.\n The form should include a username and password. If it\n does not, send the form aagain. If it does, validate\n the data.\n ' query = environ['tiddlyweb.query'] redirect = query.get('tiddlyweb_redirect', ['/'])[0] try: user = query['user'][0] password = query['password'][0] return self._validate_and_redirect(environ, start_response, user, password, redirect) except KeyError: return self._send_cookie_form(environ, start_response, redirect, '401 Unauthorized')
Respond to a POST by processing data sent from a form. The form should include a username and password. If it does not, send the form aagain. If it does, validate the data.
tiddlyweb/web/challengers/cookie_form.py
challenge_post
angeluseve/tiddlyweb
1
python
def challenge_post(self, environ, start_response): '\n Respond to a POST by processing data sent from a form.\n The form should include a username and password. If it\n does not, send the form aagain. If it does, validate\n the data.\n ' query = environ['tiddlyweb.query'] redirect = query.get('tiddlyweb_redirect', ['/'])[0] try: user = query['user'][0] password = query['password'][0] return self._validate_and_redirect(environ, start_response, user, password, redirect) except KeyError: return self._send_cookie_form(environ, start_response, redirect, '401 Unauthorized')
def challenge_post(self, environ, start_response): '\n Respond to a POST by processing data sent from a form.\n The form should include a username and password. If it\n does not, send the form aagain. If it does, validate\n the data.\n ' query = environ['tiddlyweb.query'] redirect = query.get('tiddlyweb_redirect', ['/'])[0] try: user = query['user'][0] password = query['password'][0] return self._validate_and_redirect(environ, start_response, user, password, redirect) except KeyError: return self._send_cookie_form(environ, start_response, redirect, '401 Unauthorized')<|docstring|>Respond to a POST by processing data sent from a form. The form should include a username and password. If it does not, send the form aagain. If it does, validate the data.<|endoftext|>
ae6e6fbd2328994994a4407421e633153547742a6b45e1b671d1fccca311d46c
def _send_cookie_form(self, environ, start_response, redirect, status='200 OK', message=''): '\n Send a simple form to the client asking for a username\n and password.\n ' start_response(status, [('Content-Type', 'text/html')]) environ['tiddlyweb.title'] = 'Cookie Based Login' return [('\n<pre>\n%s\n<form action="" method="POST">\nUser: <input name="user" size="40" />\nPassword <input type="password" name="password" size="40" />\n<input type="hidden" name="tiddlyweb_redirect" value="%s" />\n<input type="submit" value="submit" />\n</form>\n</pre>\n' % (message, redirect))]
Send a simple form to the client asking for a username and password.
tiddlyweb/web/challengers/cookie_form.py
_send_cookie_form
angeluseve/tiddlyweb
1
python
def _send_cookie_form(self, environ, start_response, redirect, status='200 OK', message=): '\n Send a simple form to the client asking for a username\n and password.\n ' start_response(status, [('Content-Type', 'text/html')]) environ['tiddlyweb.title'] = 'Cookie Based Login' return [('\n<pre>\n%s\n<form action= method="POST">\nUser: <input name="user" size="40" />\nPassword <input type="password" name="password" size="40" />\n<input type="hidden" name="tiddlyweb_redirect" value="%s" />\n<input type="submit" value="submit" />\n</form>\n</pre>\n' % (message, redirect))]
def _send_cookie_form(self, environ, start_response, redirect, status='200 OK', message=): '\n Send a simple form to the client asking for a username\n and password.\n ' start_response(status, [('Content-Type', 'text/html')]) environ['tiddlyweb.title'] = 'Cookie Based Login' return [('\n<pre>\n%s\n<form action= method="POST">\nUser: <input name="user" size="40" />\nPassword <input type="password" name="password" size="40" />\n<input type="hidden" name="tiddlyweb_redirect" value="%s" />\n<input type="submit" value="submit" />\n</form>\n</pre>\n' % (message, redirect))]<|docstring|>Send a simple form to the client asking for a username and password.<|endoftext|>
16d747957b8749eabdf31fefad450c09d2ac1de4bceb8c174cbc42e40b31d489
def _validate_and_redirect(self, environ, start_response, username, password, redirect): '\n Check a username and password. If valid, send a cookie\n to the client. If it is not, send the form again.\n ' status = '401 Unauthorized' try: store = environ['tiddlyweb.store'] secret = environ['tiddlyweb.config']['secret'] user = User(username) user = store.get(user) if user.check_password(password): uri = ('%s%s' % (server_host_url(environ), redirect)) cookie = Cookie.SimpleCookie() secret_string = sha(('%s%s' % (user.usersign, secret))).hexdigest() cookie['tiddlyweb_user'] = ('%s:%s' % (user.usersign, secret_string)) cookie['tiddlyweb_user']['path'] = self._cookie_path(environ) logging.debug(('303 to %s' % uri)) start_response('303 Other', [('Set-Cookie', cookie.output(header='')), ('Location', uri)]) return [uri] except KeyError: pass except NoUserError: pass return self._send_cookie_form(environ, start_response, redirect, status, 'User or Password no good')
Check a username and password. If valid, send a cookie to the client. If it is not, send the form again.
tiddlyweb/web/challengers/cookie_form.py
_validate_and_redirect
angeluseve/tiddlyweb
1
python
def _validate_and_redirect(self, environ, start_response, username, password, redirect): '\n Check a username and password. If valid, send a cookie\n to the client. If it is not, send the form again.\n ' status = '401 Unauthorized' try: store = environ['tiddlyweb.store'] secret = environ['tiddlyweb.config']['secret'] user = User(username) user = store.get(user) if user.check_password(password): uri = ('%s%s' % (server_host_url(environ), redirect)) cookie = Cookie.SimpleCookie() secret_string = sha(('%s%s' % (user.usersign, secret))).hexdigest() cookie['tiddlyweb_user'] = ('%s:%s' % (user.usersign, secret_string)) cookie['tiddlyweb_user']['path'] = self._cookie_path(environ) logging.debug(('303 to %s' % uri)) start_response('303 Other', [('Set-Cookie', cookie.output(header=)), ('Location', uri)]) return [uri] except KeyError: pass except NoUserError: pass return self._send_cookie_form(environ, start_response, redirect, status, 'User or Password no good')
def _validate_and_redirect(self, environ, start_response, username, password, redirect): '\n Check a username and password. If valid, send a cookie\n to the client. If it is not, send the form again.\n ' status = '401 Unauthorized' try: store = environ['tiddlyweb.store'] secret = environ['tiddlyweb.config']['secret'] user = User(username) user = store.get(user) if user.check_password(password): uri = ('%s%s' % (server_host_url(environ), redirect)) cookie = Cookie.SimpleCookie() secret_string = sha(('%s%s' % (user.usersign, secret))).hexdigest() cookie['tiddlyweb_user'] = ('%s:%s' % (user.usersign, secret_string)) cookie['tiddlyweb_user']['path'] = self._cookie_path(environ) logging.debug(('303 to %s' % uri)) start_response('303 Other', [('Set-Cookie', cookie.output(header=)), ('Location', uri)]) return [uri] except KeyError: pass except NoUserError: pass return self._send_cookie_form(environ, start_response, redirect, status, 'User or Password no good')<|docstring|>Check a username and password. If valid, send a cookie to the client. If it is not, send the form again.<|endoftext|>
3e4196c6bdb3a15538a027861080e81f7e870323ee03f964d0ba9b8225d12d5c
def test_download_mimic_demo(mimic_demo_path, mimic_demo_url, mimic_tables): '\n Download the MIMIC demo to a local folder.\n ' r = urllib.request.urlopen(f'{mimic_demo_url}SHA256SUMS.txt') sha_values = r.read().decode('utf-8').rstrip('\n') sha_values = [x.split(' ') for x in sha_values.split('\n')] sha_fn = [x[1] for x in sha_values] sha_values = [x[0] for x in sha_values] for table in mimic_tables: assert (f'{table}.csv' in sha_fn) idx = sha_fn.index(f'{table}.csv') sha_ref = sha_values[idx] fn = os.path.join(mimic_demo_path, f'{table}.csv') if os.path.exists(fn): fn_sha = get_sha256(fn) if (fn_sha == sha_ref): continue urlretrieve(f'{mimic_demo_url}{table}.csv', fn) fn_sha = get_sha256(fn) assert (fn_sha == sha_ref)
Download the MIMIC demo to a local folder.
mimic-iii/tests/test_postgres_build.py
test_download_mimic_demo
kingpfogel/mimic-code
1,626
python
def test_download_mimic_demo(mimic_demo_path, mimic_demo_url, mimic_tables): '\n \n ' r = urllib.request.urlopen(f'{mimic_demo_url}SHA256SUMS.txt') sha_values = r.read().decode('utf-8').rstrip('\n') sha_values = [x.split(' ') for x in sha_values.split('\n')] sha_fn = [x[1] for x in sha_values] sha_values = [x[0] for x in sha_values] for table in mimic_tables: assert (f'{table}.csv' in sha_fn) idx = sha_fn.index(f'{table}.csv') sha_ref = sha_values[idx] fn = os.path.join(mimic_demo_path, f'{table}.csv') if os.path.exists(fn): fn_sha = get_sha256(fn) if (fn_sha == sha_ref): continue urlretrieve(f'{mimic_demo_url}{table}.csv', fn) fn_sha = get_sha256(fn) assert (fn_sha == sha_ref)
def test_download_mimic_demo(mimic_demo_path, mimic_demo_url, mimic_tables): '\n \n ' r = urllib.request.urlopen(f'{mimic_demo_url}SHA256SUMS.txt') sha_values = r.read().decode('utf-8').rstrip('\n') sha_values = [x.split(' ') for x in sha_values.split('\n')] sha_fn = [x[1] for x in sha_values] sha_values = [x[0] for x in sha_values] for table in mimic_tables: assert (f'{table}.csv' in sha_fn) idx = sha_fn.index(f'{table}.csv') sha_ref = sha_values[idx] fn = os.path.join(mimic_demo_path, f'{table}.csv') if os.path.exists(fn): fn_sha = get_sha256(fn) if (fn_sha == sha_ref): continue urlretrieve(f'{mimic_demo_url}{table}.csv', fn) fn_sha = get_sha256(fn) assert (fn_sha == sha_ref)<|docstring|>Download the MIMIC demo to a local folder.<|endoftext|>
2c0fb444ee274bbb1fcc28d4bd5b4bf2bc15a6029a064a6927d78f7d54f50f2e
def test_build_mimic_demo(mimic_demo_path, mimic_db_params, create_mimic_db): '\n Try to build MIMIC-III demo using the make file and the downloaded data.\n ' build_path = os.path.join(os.getcwd(), 'mimic-iii', 'buildmimic', 'postgres/') dbname = mimic_db_params['name'] dbpass = mimic_db_params['password'] dbuser = mimic_db_params['user'] dbschema = mimic_db_params['schema'] dbhost = mimic_db_params['host'] make_mimic = f'make mimic datadir={mimic_demo_path} DBNAME={dbname} DBUSER={dbuser} DBPASS={dbpass} DBSCHEMA={dbschema} DBHOST={dbhost}' subprocess.check_output(make_mimic, shell=True, cwd=build_path)
Try to build MIMIC-III demo using the make file and the downloaded data.
mimic-iii/tests/test_postgres_build.py
test_build_mimic_demo
kingpfogel/mimic-code
1,626
python
def test_build_mimic_demo(mimic_demo_path, mimic_db_params, create_mimic_db): '\n \n ' build_path = os.path.join(os.getcwd(), 'mimic-iii', 'buildmimic', 'postgres/') dbname = mimic_db_params['name'] dbpass = mimic_db_params['password'] dbuser = mimic_db_params['user'] dbschema = mimic_db_params['schema'] dbhost = mimic_db_params['host'] make_mimic = f'make mimic datadir={mimic_demo_path} DBNAME={dbname} DBUSER={dbuser} DBPASS={dbpass} DBSCHEMA={dbschema} DBHOST={dbhost}' subprocess.check_output(make_mimic, shell=True, cwd=build_path)
def test_build_mimic_demo(mimic_demo_path, mimic_db_params, create_mimic_db): '\n \n ' build_path = os.path.join(os.getcwd(), 'mimic-iii', 'buildmimic', 'postgres/') dbname = mimic_db_params['name'] dbpass = mimic_db_params['password'] dbuser = mimic_db_params['user'] dbschema = mimic_db_params['schema'] dbhost = mimic_db_params['host'] make_mimic = f'make mimic datadir={mimic_demo_path} DBNAME={dbname} DBUSER={dbuser} DBPASS={dbpass} DBSCHEMA={dbschema} DBHOST={dbhost}' subprocess.check_output(make_mimic, shell=True, cwd=build_path)<|docstring|>Try to build MIMIC-III demo using the make file and the downloaded data.<|endoftext|>
d8a09e76ecded49d3632d1506fc194184c231e07d0146a2998bda1378d271710
def test_db_con(mimic_con): '\n Check we can select from the database.\n ' test_query = "SELECT 'another hello world';" hello_world = pd.read_sql_query(test_query, mimic_con) assert (hello_world.values[0][0] == 'another hello world')
Check we can select from the database.
mimic-iii/tests/test_postgres_build.py
test_db_con
kingpfogel/mimic-code
1,626
python
def test_db_con(mimic_con): '\n \n ' test_query = "SELECT 'another hello world';" hello_world = pd.read_sql_query(test_query, mimic_con) assert (hello_world.values[0][0] == 'another hello world')
def test_db_con(mimic_con): '\n \n ' test_query = "SELECT 'another hello world';" hello_world = pd.read_sql_query(test_query, mimic_con) assert (hello_world.values[0][0] == 'another hello world')<|docstring|>Check we can select from the database.<|endoftext|>
4e0e7ed4879536a0c45985b0476713c3e56fa450088703e591dadeef1b02accc
def test_select_min_subject_id(mimic_con, mimic_schema): '\n Minimum subject_id in the demo is 10006\n ' test_query = f''' SELECT min(subject_id) FROM {mimic_schema}.patients; ''' min_id = pd.read_sql_query(test_query, mimic_con) assert (min_id.values[0][0] == 10006)
Minimum subject_id in the demo is 10006
mimic-iii/tests/test_postgres_build.py
test_select_min_subject_id
kingpfogel/mimic-code
1,626
python
def test_select_min_subject_id(mimic_con, mimic_schema): '\n \n ' test_query = f' SELECT min(subject_id) FROM {mimic_schema}.patients; ' min_id = pd.read_sql_query(test_query, mimic_con) assert (min_id.values[0][0] == 10006)
def test_select_min_subject_id(mimic_con, mimic_schema): '\n \n ' test_query = f' SELECT min(subject_id) FROM {mimic_schema}.patients; ' min_id = pd.read_sql_query(test_query, mimic_con) assert (min_id.values[0][0] == 10006)<|docstring|>Minimum subject_id in the demo is 10006<|endoftext|>
cf21ca543ac00d7a5492e9bf094e84ccb08e95218b335c7c6931f87fb0e88aa3
def test_itemids_in_inputevents_cv_are_shifted(mimic_con, mimic_schema): '\n Number of ITEMIDs which were erroneously left as original value\n ' query = f''' SELECT COUNT(*) FROM {mimic_schema}.inputevents_cv WHERE itemid < 30000; ''' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
Number of ITEMIDs which were erroneously left as original value
mimic-iii/tests/test_postgres_build.py
test_itemids_in_inputevents_cv_are_shifted
kingpfogel/mimic-code
1,626
python
def test_itemids_in_inputevents_cv_are_shifted(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.inputevents_cv WHERE itemid < 30000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
def test_itemids_in_inputevents_cv_are_shifted(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.inputevents_cv WHERE itemid < 30000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)<|docstring|>Number of ITEMIDs which were erroneously left as original value<|endoftext|>
72b18f4a5b238e67af4ecf9b0364695a78650b8e927cfda5bf0aed03ca84b248
def test_itemids_in_inputevents_mv_are_shifted(mimic_con, mimic_schema): '\n Number of ITEMIDs which were erroneously left as original value\n ' query = f''' SELECT COUNT(*) FROM {mimic_schema}.inputevents_mv WHERE itemid < 220000; ''' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
Number of ITEMIDs which were erroneously left as original value
mimic-iii/tests/test_postgres_build.py
test_itemids_in_inputevents_mv_are_shifted
kingpfogel/mimic-code
1,626
python
def test_itemids_in_inputevents_mv_are_shifted(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.inputevents_mv WHERE itemid < 220000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
def test_itemids_in_inputevents_mv_are_shifted(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.inputevents_mv WHERE itemid < 220000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)<|docstring|>Number of ITEMIDs which were erroneously left as original value<|endoftext|>
ec0812d63a80d9bfaece38ddb8fde608c5e6685d93dadbe3867fd4d21be87d99
def test_itemids_in_outputevents_are_shifted(mimic_con, mimic_schema): '\n Number of ITEMIDs which were erroneously left as original value\n ' query = f''' SELECT COUNT(*) FROM {mimic_schema}.outputevents WHERE itemid < 30000; ''' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
Number of ITEMIDs which were erroneously left as original value
mimic-iii/tests/test_postgres_build.py
test_itemids_in_outputevents_are_shifted
kingpfogel/mimic-code
1,626
python
def test_itemids_in_outputevents_are_shifted(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.outputevents WHERE itemid < 30000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
def test_itemids_in_outputevents_are_shifted(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.outputevents WHERE itemid < 30000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)<|docstring|>Number of ITEMIDs which were erroneously left as original value<|endoftext|>
80deeea6074aceec3ffee22c25b21f756740b0dc882daa59a2d17184e9eff2d2
def test_itemids_in_inputevents_cv_are_in_range(mimic_con, mimic_schema): '\n Number of ITEMIDs which are above the allowable range\n ' query = f''' SELECT COUNT(*) FROM {mimic_schema}.inputevents_cv WHERE itemid > 50000; ''' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
Number of ITEMIDs which are above the allowable range
mimic-iii/tests/test_postgres_build.py
test_itemids_in_inputevents_cv_are_in_range
kingpfogel/mimic-code
1,626
python
def test_itemids_in_inputevents_cv_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.inputevents_cv WHERE itemid > 50000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
def test_itemids_in_inputevents_cv_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.inputevents_cv WHERE itemid > 50000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)<|docstring|>Number of ITEMIDs which are above the allowable range<|endoftext|>
d08f6abc7ae71ed11e55f4b86055119c6b0acf30054fad3703e3d130961563fd
def test_itemids_in_outputevents_are_in_range(mimic_con, mimic_schema): '\n Number of ITEMIDs which are not in the allowable range\n ' query = f''' SELECT COUNT(*) FROM {mimic_schema}.outputevents WHERE itemid > 50000 AND itemid < 220000; ''' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
Number of ITEMIDs which are not in the allowable range
mimic-iii/tests/test_postgres_build.py
test_itemids_in_outputevents_are_in_range
kingpfogel/mimic-code
1,626
python
def test_itemids_in_outputevents_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.outputevents WHERE itemid > 50000 AND itemid < 220000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
def test_itemids_in_outputevents_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.outputevents WHERE itemid > 50000 AND itemid < 220000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)<|docstring|>Number of ITEMIDs which are not in the allowable range<|endoftext|>
cfe551b6428a1e98950c90c16f05eed4c0743f0a96f98ecc7b4fe35ea068d77a
def test_itemids_in_chartevents_are_in_range(mimic_con, mimic_schema): '\n Number of ITEMIDs which are not in the allowable range\n ' query = f''' SELECT COUNT(*) FROM {mimic_schema}.chartevents WHERE itemid > 20000 AND itemid < 220000; ''' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
Number of ITEMIDs which are not in the allowable range
mimic-iii/tests/test_postgres_build.py
test_itemids_in_chartevents_are_in_range
kingpfogel/mimic-code
1,626
python
def test_itemids_in_chartevents_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.chartevents WHERE itemid > 20000 AND itemid < 220000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
def test_itemids_in_chartevents_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.chartevents WHERE itemid > 20000 AND itemid < 220000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)<|docstring|>Number of ITEMIDs which are not in the allowable range<|endoftext|>
091d3d38319d8098e9d2cd567bbd30ad4e4d01258bf62209637e9d67e49f5e95
def test_itemids_in_procedureevents_mv_are_in_range(mimic_con, mimic_schema): '\n Number of ITEMIDs which are not in the allowable range\n ' query = f''' SELECT COUNT(*) FROM {mimic_schema}.procedureevents_mv WHERE itemid < 220000; ''' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
Number of ITEMIDs which are not in the allowable range
mimic-iii/tests/test_postgres_build.py
test_itemids_in_procedureevents_mv_are_in_range
kingpfogel/mimic-code
1,626
python
def test_itemids_in_procedureevents_mv_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.procedureevents_mv WHERE itemid < 220000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
def test_itemids_in_procedureevents_mv_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.procedureevents_mv WHERE itemid < 220000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)<|docstring|>Number of ITEMIDs which are not in the allowable range<|endoftext|>
ef9f6211af3a2167f925cce07ee906010641ef506e87e0a6cb91432691f9f8c5
def test_itemids_in_labevents_are_in_range(mimic_con, mimic_schema): '\n Number of ITEMIDs which are not in the allowable range\n ' query = f''' SELECT COUNT(*) FROM {mimic_schema}.labevents WHERE itemid < 50000 OR itemid > 60000; ''' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
Number of ITEMIDs which are not in the allowable range
mimic-iii/tests/test_postgres_build.py
test_itemids_in_labevents_are_in_range
kingpfogel/mimic-code
1,626
python
def test_itemids_in_labevents_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.labevents WHERE itemid < 50000 OR itemid > 60000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)
def test_itemids_in_labevents_are_in_range(mimic_con, mimic_schema): '\n \n ' query = f' SELECT COUNT(*) FROM {mimic_schema}.labevents WHERE itemid < 50000 OR itemid > 60000; ' queryresult = pd.read_sql_query(query, mimic_con) assert (queryresult.values[0][0] == 0)<|docstring|>Number of ITEMIDs which are not in the allowable range<|endoftext|>