Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
8,800
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/security/security_client.py
SecurityClient.has_permissions_batch
def has_permissions_batch(self, eval_batch): """HasPermissionsBatch. Evaluates multiple permissions for the calling user. Note: This method does not aggregate the results, nor does it short-circuit if one of the permissions evaluates to false. :param :class:`<PermissionEvaluationBatch> <azure.devops.v5_0.security.models.PermissionEvaluationBatch>` eval_batch: The set of evaluation requests. :rtype: :class:`<PermissionEvaluationBatch> <azure.devops.v5_0.security.models.PermissionEvaluationBatch>` """ content = self._serialize.body(eval_batch, 'PermissionEvaluationBatch') response = self._send(http_method='POST', location_id='cf1faa59-1b63-4448-bf04-13d981a46f5d', version='5.0', content=content) return self._deserialize('PermissionEvaluationBatch', response)
python
def has_permissions_batch(self, eval_batch): """HasPermissionsBatch. Evaluates multiple permissions for the calling user. Note: This method does not aggregate the results, nor does it short-circuit if one of the permissions evaluates to false. :param :class:`<PermissionEvaluationBatch> <azure.devops.v5_0.security.models.PermissionEvaluationBatch>` eval_batch: The set of evaluation requests. :rtype: :class:`<PermissionEvaluationBatch> <azure.devops.v5_0.security.models.PermissionEvaluationBatch>` """ content = self._serialize.body(eval_batch, 'PermissionEvaluationBatch') response = self._send(http_method='POST', location_id='cf1faa59-1b63-4448-bf04-13d981a46f5d', version='5.0', content=content) return self._deserialize('PermissionEvaluationBatch', response)
['def', 'has_permissions_batch', '(', 'self', ',', 'eval_batch', ')', ':', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'eval_batch', ',', "'PermissionEvaluationBatch'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'POST'", ',', 'location_id', '=', "'cf1faa59-1b63-4448-bf04-13d981a46f5d'", ',', 'version', '=', "'5.0'", ',', 'content', '=', 'content', ')', 'return', 'self', '.', '_deserialize', '(', "'PermissionEvaluationBatch'", ',', 'response', ')']
HasPermissionsBatch. Evaluates multiple permissions for the calling user. Note: This method does not aggregate the results, nor does it short-circuit if one of the permissions evaluates to false. :param :class:`<PermissionEvaluationBatch> <azure.devops.v5_0.security.models.PermissionEvaluationBatch>` eval_batch: The set of evaluation requests. :rtype: :class:`<PermissionEvaluationBatch> <azure.devops.v5_0.security.models.PermissionEvaluationBatch>`
['HasPermissionsBatch', '.', 'Evaluates', 'multiple', 'permissions', 'for', 'the', 'calling', 'user', '.', 'Note', ':', 'This', 'method', 'does', 'not', 'aggregate', 'the', 'results', 'nor', 'does', 'it', 'short', '-', 'circuit', 'if', 'one', 'of', 'the', 'permissions', 'evaluates', 'to', 'false', '.', ':', 'param', ':', 'class', ':', '<PermissionEvaluationBatch', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'security', '.', 'models', '.', 'PermissionEvaluationBatch', '>', 'eval_batch', ':', 'The', 'set', 'of', 'evaluation', 'requests', '.', ':', 'rtype', ':', ':', 'class', ':', '<PermissionEvaluationBatch', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'security', '.', 'models', '.', 'PermissionEvaluationBatch', '>']
train
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/security/security_client.py#L137-L148
8,801
CI-WATER/mapkit
mapkit/ColorRampGenerator.py
MappedColorRamp.getColorMapAsContinuousSLD
def getColorMapAsContinuousSLD(self, nodata=-9999): """ Return the mapped color ramp as a :rtype: str """ colorMap = ET.Element('ColorMap', type='interval') # Add a line for the no-data values (nv) ET.SubElement(colorMap, 'ColorMapEntry', color='#000000', quantity=str(nodata), label='NoData', opacity='0.0') def get_label_formatter(value): label_tag="{label:.0f}" if abs(value) < 0.01 and value != 0: label_tag = "{label:.2E}" elif abs(value) < 10: label_tag="{label:.2f}" elif abs(value) < 99: label_tag="{label:.1f}" return label_tag if self.min != self.max and self.slope > 0: for rampIndex in range(len(self.colorRamp)): valueForIndex = (rampIndex - self.intercept) / self.slope red, green, blue = self.colorRamp[rampIndex] hexRGB = '#%02X%02X%02X' % (red, green, blue) label_tag = get_label_formatter(valueForIndex) ET.SubElement(colorMap, 'ColorMapEntry', color=hexRGB, quantity=str(valueForIndex), label=label_tag.format(label=valueForIndex), opacity=str(self.alpha)) else: valueForIndex = self.max red, green, blue = self.colorRamp[0] hexRGB = '#%02X%02X%02X' % (red, green, blue) label_tag = get_label_formatter(valueForIndex) ET.SubElement(colorMap, 'ColorMapEntry', color=hexRGB, quantity=str(valueForIndex), label=label_tag.format(label=valueForIndex), opacity=str(self.alpha)) return ET.tostring(colorMap)
python
def getColorMapAsContinuousSLD(self, nodata=-9999): """ Return the mapped color ramp as a :rtype: str """ colorMap = ET.Element('ColorMap', type='interval') # Add a line for the no-data values (nv) ET.SubElement(colorMap, 'ColorMapEntry', color='#000000', quantity=str(nodata), label='NoData', opacity='0.0') def get_label_formatter(value): label_tag="{label:.0f}" if abs(value) < 0.01 and value != 0: label_tag = "{label:.2E}" elif abs(value) < 10: label_tag="{label:.2f}" elif abs(value) < 99: label_tag="{label:.1f}" return label_tag if self.min != self.max and self.slope > 0: for rampIndex in range(len(self.colorRamp)): valueForIndex = (rampIndex - self.intercept) / self.slope red, green, blue = self.colorRamp[rampIndex] hexRGB = '#%02X%02X%02X' % (red, green, blue) label_tag = get_label_formatter(valueForIndex) ET.SubElement(colorMap, 'ColorMapEntry', color=hexRGB, quantity=str(valueForIndex), label=label_tag.format(label=valueForIndex), opacity=str(self.alpha)) else: valueForIndex = self.max red, green, blue = self.colorRamp[0] hexRGB = '#%02X%02X%02X' % (red, green, blue) label_tag = get_label_formatter(valueForIndex) ET.SubElement(colorMap, 'ColorMapEntry', color=hexRGB, quantity=str(valueForIndex), label=label_tag.format(label=valueForIndex), opacity=str(self.alpha)) return ET.tostring(colorMap)
['def', 'getColorMapAsContinuousSLD', '(', 'self', ',', 'nodata', '=', '-', '9999', ')', ':', 'colorMap', '=', 'ET', '.', 'Element', '(', "'ColorMap'", ',', 'type', '=', "'interval'", ')', '# Add a line for the no-data values (nv)', 'ET', '.', 'SubElement', '(', 'colorMap', ',', "'ColorMapEntry'", ',', 'color', '=', "'#000000'", ',', 'quantity', '=', 'str', '(', 'nodata', ')', ',', 'label', '=', "'NoData'", ',', 'opacity', '=', "'0.0'", ')', 'def', 'get_label_formatter', '(', 'value', ')', ':', 'label_tag', '=', '"{label:.0f}"', 'if', 'abs', '(', 'value', ')', '<', '0.01', 'and', 'value', '!=', '0', ':', 'label_tag', '=', '"{label:.2E}"', 'elif', 'abs', '(', 'value', ')', '<', '10', ':', 'label_tag', '=', '"{label:.2f}"', 'elif', 'abs', '(', 'value', ')', '<', '99', ':', 'label_tag', '=', '"{label:.1f}"', 'return', 'label_tag', 'if', 'self', '.', 'min', '!=', 'self', '.', 'max', 'and', 'self', '.', 'slope', '>', '0', ':', 'for', 'rampIndex', 'in', 'range', '(', 'len', '(', 'self', '.', 'colorRamp', ')', ')', ':', 'valueForIndex', '=', '(', 'rampIndex', '-', 'self', '.', 'intercept', ')', '/', 'self', '.', 'slope', 'red', ',', 'green', ',', 'blue', '=', 'self', '.', 'colorRamp', '[', 'rampIndex', ']', 'hexRGB', '=', "'#%02X%02X%02X'", '%', '(', 'red', ',', 'green', ',', 'blue', ')', 'label_tag', '=', 'get_label_formatter', '(', 'valueForIndex', ')', 'ET', '.', 'SubElement', '(', 'colorMap', ',', "'ColorMapEntry'", ',', 'color', '=', 'hexRGB', ',', 'quantity', '=', 'str', '(', 'valueForIndex', ')', ',', 'label', '=', 'label_tag', '.', 'format', '(', 'label', '=', 'valueForIndex', ')', ',', 'opacity', '=', 'str', '(', 'self', '.', 'alpha', ')', ')', 'else', ':', 'valueForIndex', '=', 'self', '.', 'max', 'red', ',', 'green', ',', 'blue', '=', 'self', '.', 'colorRamp', '[', '0', ']', 'hexRGB', '=', "'#%02X%02X%02X'", '%', '(', 'red', ',', 'green', ',', 'blue', ')', 'label_tag', '=', 'get_label_formatter', '(', 'valueForIndex', ')', 'ET', '.', 'SubElement', '(', 'colorMap', ',', "'ColorMapEntry'", ',', 'color', '=', 'hexRGB', ',', 'quantity', '=', 'str', '(', 'valueForIndex', ')', ',', 'label', '=', 'label_tag', '.', 'format', '(', 'label', '=', 'valueForIndex', ')', ',', 'opacity', '=', 'str', '(', 'self', '.', 'alpha', ')', ')', 'return', 'ET', '.', 'tostring', '(', 'colorMap', ')']
Return the mapped color ramp as a :rtype: str
['Return', 'the', 'mapped', 'color', 'ramp', 'as', 'a', ':', 'rtype', ':', 'str']
train
https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/ColorRampGenerator.py#L115-L161
8,802
radjkarl/imgProcessor
imgProcessor/camera/LensDistortion.py
LensDistortion.getCameraParams
def getCameraParams(self): ''' value positions based on http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#cv.InitUndistortRectifyMap ''' c = self.coeffs['cameraMatrix'] fx = c[0][0] fy = c[1][1] cx = c[0][2] cy = c[1][2] k1, k2, p1, p2, k3 = tuple(self.coeffs['distortionCoeffs'].tolist()[0]) return fx, fy, cx, cy, k1, k2, k3, p1, p2
python
def getCameraParams(self): ''' value positions based on http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#cv.InitUndistortRectifyMap ''' c = self.coeffs['cameraMatrix'] fx = c[0][0] fy = c[1][1] cx = c[0][2] cy = c[1][2] k1, k2, p1, p2, k3 = tuple(self.coeffs['distortionCoeffs'].tolist()[0]) return fx, fy, cx, cy, k1, k2, k3, p1, p2
['def', 'getCameraParams', '(', 'self', ')', ':', 'c', '=', 'self', '.', 'coeffs', '[', "'cameraMatrix'", ']', 'fx', '=', 'c', '[', '0', ']', '[', '0', ']', 'fy', '=', 'c', '[', '1', ']', '[', '1', ']', 'cx', '=', 'c', '[', '0', ']', '[', '2', ']', 'cy', '=', 'c', '[', '1', ']', '[', '2', ']', 'k1', ',', 'k2', ',', 'p1', ',', 'p2', ',', 'k3', '=', 'tuple', '(', 'self', '.', 'coeffs', '[', "'distortionCoeffs'", ']', '.', 'tolist', '(', ')', '[', '0', ']', ')', 'return', 'fx', ',', 'fy', ',', 'cx', ',', 'cy', ',', 'k1', ',', 'k2', ',', 'k3', ',', 'p1', ',', 'p2']
value positions based on http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html#cv.InitUndistortRectifyMap
['value', 'positions', 'based', 'on', 'http', ':', '//', 'docs', '.', 'opencv', '.', 'org', '/', 'modules', '/', 'imgproc', '/', 'doc', '/', 'geometric_transformations', '.', 'html#cv', '.', 'InitUndistortRectifyMap']
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/LensDistortion.py#L360-L371
8,803
emdb-empiar/ahds
ahds/data_stream.py
ImageSet.segments
def segments(self): """A dictionary of lists of contours keyed by z-index""" segments = dict() for i in xrange(len(self)): image = self[i] for z, contour in image.as_segments.iteritems(): for byte_value, contour_set in contour.iteritems(): if byte_value not in segments: segments[byte_value] = dict() if z not in segments[byte_value]: segments[byte_value][z] = contour_set else: segments[byte_value][z] += contour_set return segments
python
def segments(self): """A dictionary of lists of contours keyed by z-index""" segments = dict() for i in xrange(len(self)): image = self[i] for z, contour in image.as_segments.iteritems(): for byte_value, contour_set in contour.iteritems(): if byte_value not in segments: segments[byte_value] = dict() if z not in segments[byte_value]: segments[byte_value][z] = contour_set else: segments[byte_value][z] += contour_set return segments
['def', 'segments', '(', 'self', ')', ':', 'segments', '=', 'dict', '(', ')', 'for', 'i', 'in', 'xrange', '(', 'len', '(', 'self', ')', ')', ':', 'image', '=', 'self', '[', 'i', ']', 'for', 'z', ',', 'contour', 'in', 'image', '.', 'as_segments', '.', 'iteritems', '(', ')', ':', 'for', 'byte_value', ',', 'contour_set', 'in', 'contour', '.', 'iteritems', '(', ')', ':', 'if', 'byte_value', 'not', 'in', 'segments', ':', 'segments', '[', 'byte_value', ']', '=', 'dict', '(', ')', 'if', 'z', 'not', 'in', 'segments', '[', 'byte_value', ']', ':', 'segments', '[', 'byte_value', ']', '[', 'z', ']', '=', 'contour_set', 'else', ':', 'segments', '[', 'byte_value', ']', '[', 'z', ']', '+=', 'contour_set', 'return', 'segments']
A dictionary of lists of contours keyed by z-index
['A', 'dictionary', 'of', 'lists', 'of', 'contours', 'keyed', 'by', 'z', '-', 'index']
train
https://github.com/emdb-empiar/ahds/blob/6a752f6806d4f62155cd2e1194de8aabe7195e0f/ahds/data_stream.py#L242-L256
8,804
awslabs/aws-cfn-template-flip
cfn_flip/__init__.py
dump_yaml
def dump_yaml(data, clean_up=False, long_form=False): """ Output some YAML """ return yaml.dump( data, Dumper=get_dumper(clean_up, long_form), default_flow_style=False, allow_unicode=True )
python
def dump_yaml(data, clean_up=False, long_form=False): """ Output some YAML """ return yaml.dump( data, Dumper=get_dumper(clean_up, long_form), default_flow_style=False, allow_unicode=True )
['def', 'dump_yaml', '(', 'data', ',', 'clean_up', '=', 'False', ',', 'long_form', '=', 'False', ')', ':', 'return', 'yaml', '.', 'dump', '(', 'data', ',', 'Dumper', '=', 'get_dumper', '(', 'clean_up', ',', 'long_form', ')', ',', 'default_flow_style', '=', 'False', ',', 'allow_unicode', '=', 'True', ')']
Output some YAML
['Output', 'some', 'YAML']
train
https://github.com/awslabs/aws-cfn-template-flip/blob/837576bea243e3f5efb0a20b84802371272e2d33/cfn_flip/__init__.py#L37-L47
8,805
raiden-network/raiden
raiden/transfer/mediated_transfer/mediator.py
sanity_check
def sanity_check( state: MediatorTransferState, channelidentifiers_to_channels: ChannelMap, ) -> None: """ Check invariants that must hold. """ # if a transfer is paid we must know the secret all_transfers_states = itertools.chain( (pair.payee_state for pair in state.transfers_pair), (pair.payer_state for pair in state.transfers_pair), ) if any(state in STATE_TRANSFER_PAID for state in all_transfers_states): assert state.secret is not None # the "transitivity" for these values is checked below as part of # almost_equal check if state.transfers_pair: first_pair = state.transfers_pair[0] assert state.secrethash == first_pair.payer_transfer.lock.secrethash for pair in state.transfers_pair: payee_channel = get_payee_channel( channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_pair=pair, ) # Channel could have been removed if not payee_channel: continue assert is_send_transfer_almost_equal( send_channel=payee_channel, send=pair.payee_transfer, received=pair.payer_transfer, ) assert pair.payer_state in pair.valid_payer_states assert pair.payee_state in pair.valid_payee_states for original, refund in zip(state.transfers_pair[:-1], state.transfers_pair[1:]): assert original.payee_address == refund.payer_address payer_channel = get_payer_channel( channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_pair=refund, ) # Channel could have been removed if not payer_channel: continue transfer_sent = original.payee_transfer transfer_received = refund.payer_transfer assert is_send_transfer_almost_equal( send_channel=payer_channel, send=transfer_sent, received=transfer_received, ) if state.waiting_transfer and state.transfers_pair: last_transfer_pair = state.transfers_pair[-1] payee_channel = get_payee_channel( channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_pair=last_transfer_pair, ) # Channel could have been removed if payee_channel: transfer_sent = last_transfer_pair.payee_transfer transfer_received = state.waiting_transfer.transfer assert is_send_transfer_almost_equal( send_channel=payee_channel, send=transfer_sent, received=transfer_received, )
python
def sanity_check( state: MediatorTransferState, channelidentifiers_to_channels: ChannelMap, ) -> None: """ Check invariants that must hold. """ # if a transfer is paid we must know the secret all_transfers_states = itertools.chain( (pair.payee_state for pair in state.transfers_pair), (pair.payer_state for pair in state.transfers_pair), ) if any(state in STATE_TRANSFER_PAID for state in all_transfers_states): assert state.secret is not None # the "transitivity" for these values is checked below as part of # almost_equal check if state.transfers_pair: first_pair = state.transfers_pair[0] assert state.secrethash == first_pair.payer_transfer.lock.secrethash for pair in state.transfers_pair: payee_channel = get_payee_channel( channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_pair=pair, ) # Channel could have been removed if not payee_channel: continue assert is_send_transfer_almost_equal( send_channel=payee_channel, send=pair.payee_transfer, received=pair.payer_transfer, ) assert pair.payer_state in pair.valid_payer_states assert pair.payee_state in pair.valid_payee_states for original, refund in zip(state.transfers_pair[:-1], state.transfers_pair[1:]): assert original.payee_address == refund.payer_address payer_channel = get_payer_channel( channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_pair=refund, ) # Channel could have been removed if not payer_channel: continue transfer_sent = original.payee_transfer transfer_received = refund.payer_transfer assert is_send_transfer_almost_equal( send_channel=payer_channel, send=transfer_sent, received=transfer_received, ) if state.waiting_transfer and state.transfers_pair: last_transfer_pair = state.transfers_pair[-1] payee_channel = get_payee_channel( channelidentifiers_to_channels=channelidentifiers_to_channels, transfer_pair=last_transfer_pair, ) # Channel could have been removed if payee_channel: transfer_sent = last_transfer_pair.payee_transfer transfer_received = state.waiting_transfer.transfer assert is_send_transfer_almost_equal( send_channel=payee_channel, send=transfer_sent, received=transfer_received, )
['def', 'sanity_check', '(', 'state', ':', 'MediatorTransferState', ',', 'channelidentifiers_to_channels', ':', 'ChannelMap', ',', ')', '->', 'None', ':', '# if a transfer is paid we must know the secret', 'all_transfers_states', '=', 'itertools', '.', 'chain', '(', '(', 'pair', '.', 'payee_state', 'for', 'pair', 'in', 'state', '.', 'transfers_pair', ')', ',', '(', 'pair', '.', 'payer_state', 'for', 'pair', 'in', 'state', '.', 'transfers_pair', ')', ',', ')', 'if', 'any', '(', 'state', 'in', 'STATE_TRANSFER_PAID', 'for', 'state', 'in', 'all_transfers_states', ')', ':', 'assert', 'state', '.', 'secret', 'is', 'not', 'None', '# the "transitivity" for these values is checked below as part of', '# almost_equal check', 'if', 'state', '.', 'transfers_pair', ':', 'first_pair', '=', 'state', '.', 'transfers_pair', '[', '0', ']', 'assert', 'state', '.', 'secrethash', '==', 'first_pair', '.', 'payer_transfer', '.', 'lock', '.', 'secrethash', 'for', 'pair', 'in', 'state', '.', 'transfers_pair', ':', 'payee_channel', '=', 'get_payee_channel', '(', 'channelidentifiers_to_channels', '=', 'channelidentifiers_to_channels', ',', 'transfer_pair', '=', 'pair', ',', ')', '# Channel could have been removed', 'if', 'not', 'payee_channel', ':', 'continue', 'assert', 'is_send_transfer_almost_equal', '(', 'send_channel', '=', 'payee_channel', ',', 'send', '=', 'pair', '.', 'payee_transfer', ',', 'received', '=', 'pair', '.', 'payer_transfer', ',', ')', 'assert', 'pair', '.', 'payer_state', 'in', 'pair', '.', 'valid_payer_states', 'assert', 'pair', '.', 'payee_state', 'in', 'pair', '.', 'valid_payee_states', 'for', 'original', ',', 'refund', 'in', 'zip', '(', 'state', '.', 'transfers_pair', '[', ':', '-', '1', ']', ',', 'state', '.', 'transfers_pair', '[', '1', ':', ']', ')', ':', 'assert', 'original', '.', 'payee_address', '==', 'refund', '.', 'payer_address', 'payer_channel', '=', 'get_payer_channel', '(', 'channelidentifiers_to_channels', '=', 'channelidentifiers_to_channels', ',', 'transfer_pair', '=', 'refund', ',', ')', '# Channel could have been removed', 'if', 'not', 'payer_channel', ':', 'continue', 'transfer_sent', '=', 'original', '.', 'payee_transfer', 'transfer_received', '=', 'refund', '.', 'payer_transfer', 'assert', 'is_send_transfer_almost_equal', '(', 'send_channel', '=', 'payer_channel', ',', 'send', '=', 'transfer_sent', ',', 'received', '=', 'transfer_received', ',', ')', 'if', 'state', '.', 'waiting_transfer', 'and', 'state', '.', 'transfers_pair', ':', 'last_transfer_pair', '=', 'state', '.', 'transfers_pair', '[', '-', '1', ']', 'payee_channel', '=', 'get_payee_channel', '(', 'channelidentifiers_to_channels', '=', 'channelidentifiers_to_channels', ',', 'transfer_pair', '=', 'last_transfer_pair', ',', ')', '# Channel could have been removed', 'if', 'payee_channel', ':', 'transfer_sent', '=', 'last_transfer_pair', '.', 'payee_transfer', 'transfer_received', '=', 'state', '.', 'waiting_transfer', '.', 'transfer', 'assert', 'is_send_transfer_almost_equal', '(', 'send_channel', '=', 'payee_channel', ',', 'send', '=', 'transfer_sent', ',', 'received', '=', 'transfer_received', ',', ')']
Check invariants that must hold.
['Check', 'invariants', 'that', 'must', 'hold', '.']
train
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/mediated_transfer/mediator.py#L293-L365
8,806
lawsie/guizero
guizero/ButtonGroup.py
ButtonGroup.append
def append(self, option): """ Appends a new `option` to the end of the ButtonGroup. :param string/List option: The option to append to the ButtonGroup. If a 2D list is specified, the first element is the text, the second is the value. """ self._options.append(self._parse_option(option)) self._refresh_options() self.resize(self._width, self._height)
python
def append(self, option): """ Appends a new `option` to the end of the ButtonGroup. :param string/List option: The option to append to the ButtonGroup. If a 2D list is specified, the first element is the text, the second is the value. """ self._options.append(self._parse_option(option)) self._refresh_options() self.resize(self._width, self._height)
['def', 'append', '(', 'self', ',', 'option', ')', ':', 'self', '.', '_options', '.', 'append', '(', 'self', '.', '_parse_option', '(', 'option', ')', ')', 'self', '.', '_refresh_options', '(', ')', 'self', '.', 'resize', '(', 'self', '.', '_width', ',', 'self', '.', '_height', ')']
Appends a new `option` to the end of the ButtonGroup. :param string/List option: The option to append to the ButtonGroup. If a 2D list is specified, the first element is the text, the second is the value.
['Appends', 'a', 'new', 'option', 'to', 'the', 'end', 'of', 'the', 'ButtonGroup', '.']
train
https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/ButtonGroup.py#L235-L245
8,807
llazzaro/analyzerdam
analyzerdam/sqlDAM.py
SqlDAM.write_fundamental
def write_fundamental(self, keyTimeValueDict): ''' write fundamental ''' if self.first: Base.metadata.create_all(self.__getEngine(), checkfirst=True) self.first=False sqls=self._fundamentalToSqls(keyTimeValueDict) session=self.Session() try: session.add_all(sqls) finally: self.Session.remove()
python
def write_fundamental(self, keyTimeValueDict): ''' write fundamental ''' if self.first: Base.metadata.create_all(self.__getEngine(), checkfirst=True) self.first=False sqls=self._fundamentalToSqls(keyTimeValueDict) session=self.Session() try: session.add_all(sqls) finally: self.Session.remove()
['def', 'write_fundamental', '(', 'self', ',', 'keyTimeValueDict', ')', ':', 'if', 'self', '.', 'first', ':', 'Base', '.', 'metadata', '.', 'create_all', '(', 'self', '.', '__getEngine', '(', ')', ',', 'checkfirst', '=', 'True', ')', 'self', '.', 'first', '=', 'False', 'sqls', '=', 'self', '.', '_fundamentalToSqls', '(', 'keyTimeValueDict', ')', 'session', '=', 'self', '.', 'Session', '(', ')', 'try', ':', 'session', '.', 'add_all', '(', 'sqls', ')', 'finally', ':', 'self', '.', 'Session', '.', 'remove', '(', ')']
write fundamental
['write', 'fundamental']
train
https://github.com/llazzaro/analyzerdam/blob/c5bc7483dae23bd2e14bbf36147b7a43a0067bc0/analyzerdam/sqlDAM.py#L181-L192
8,808
happyleavesaoc/python-voobly
utils/update_metadata.py
get_ladder_metadata
def get_ladder_metadata(session, url): """Get ladder metadata.""" parsed = make_scrape_request(session, url) tag = parsed.find('a', href=re.compile(LADDER_ID_REGEX)) return { 'id': int(tag['href'].split('/')[-1]), 'slug': url.split('/')[-1], 'url': url }
python
def get_ladder_metadata(session, url): """Get ladder metadata.""" parsed = make_scrape_request(session, url) tag = parsed.find('a', href=re.compile(LADDER_ID_REGEX)) return { 'id': int(tag['href'].split('/')[-1]), 'slug': url.split('/')[-1], 'url': url }
['def', 'get_ladder_metadata', '(', 'session', ',', 'url', ')', ':', 'parsed', '=', 'make_scrape_request', '(', 'session', ',', 'url', ')', 'tag', '=', 'parsed', '.', 'find', '(', "'a'", ',', 'href', '=', 're', '.', 'compile', '(', 'LADDER_ID_REGEX', ')', ')', 'return', '{', "'id'", ':', 'int', '(', 'tag', '[', "'href'", ']', '.', 'split', '(', "'/'", ')', '[', '-', '1', ']', ')', ',', "'slug'", ':', 'url', '.', 'split', '(', "'/'", ')', '[', '-', '1', ']', ',', "'url'", ':', 'url', '}']
Get ladder metadata.
['Get', 'ladder', 'metadata', '.']
train
https://github.com/happyleavesaoc/python-voobly/blob/83b4ab7d630a00459c2a64e55e3ac85c7be38194/utils/update_metadata.py#L14-L22
8,809
fbcotter/py3nvml
py3nvml/py3nvml.py
nvmlDeviceGetPcieThroughput
def nvmlDeviceGetPcieThroughput(device, counter): r""" /** * Retrieve PCIe utilization information. * This function is querying a byte counter over a 20ms interval and thus is the * PCIe throughput over that interval. * * For Maxwell &tm; or newer fully supported devices. * * This method is not supported in virtual machines running virtual GPU (vGPU). * * @param device The identifier of the target device * @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t * @param value Reference in which to return throughput in KB/s * * @return * - \ref NVML_SUCCESS if \a value has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPcieThroughput """ c_util = c_uint() fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieThroughput") ret = fn(device, _nvmlPcieUtilCounter_t(counter), byref(c_util)) _nvmlCheckReturn(ret) return bytes_to_str(c_util.value)
python
def nvmlDeviceGetPcieThroughput(device, counter): r""" /** * Retrieve PCIe utilization information. * This function is querying a byte counter over a 20ms interval and thus is the * PCIe throughput over that interval. * * For Maxwell &tm; or newer fully supported devices. * * This method is not supported in virtual machines running virtual GPU (vGPU). * * @param device The identifier of the target device * @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t * @param value Reference in which to return throughput in KB/s * * @return * - \ref NVML_SUCCESS if \a value has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPcieThroughput """ c_util = c_uint() fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieThroughput") ret = fn(device, _nvmlPcieUtilCounter_t(counter), byref(c_util)) _nvmlCheckReturn(ret) return bytes_to_str(c_util.value)
['def', 'nvmlDeviceGetPcieThroughput', '(', 'device', ',', 'counter', ')', ':', 'c_util', '=', 'c_uint', '(', ')', 'fn', '=', '_nvmlGetFunctionPointer', '(', '"nvmlDeviceGetPcieThroughput"', ')', 'ret', '=', 'fn', '(', 'device', ',', '_nvmlPcieUtilCounter_t', '(', 'counter', ')', ',', 'byref', '(', 'c_util', ')', ')', '_nvmlCheckReturn', '(', 'ret', ')', 'return', 'bytes_to_str', '(', 'c_util', '.', 'value', ')']
r""" /** * Retrieve PCIe utilization information. * This function is querying a byte counter over a 20ms interval and thus is the * PCIe throughput over that interval. * * For Maxwell &tm; or newer fully supported devices. * * This method is not supported in virtual machines running virtual GPU (vGPU). * * @param device The identifier of the target device * @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t * @param value Reference in which to return throughput in KB/s * * @return * - \ref NVML_SUCCESS if \a value has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetPcieThroughput
['r', '/', '**', '*', 'Retrieve', 'PCIe', 'utilization', 'information', '.', '*', 'This', 'function', 'is', 'querying', 'a', 'byte', 'counter', 'over', 'a', '20ms', 'interval', 'and', 'thus', 'is', 'the', '*', 'PCIe', 'throughput', 'over', 'that', 'interval', '.', '*', '*', 'For', 'Maxwell', '&tm', ';', 'or', 'newer', 'fully', 'supported', 'devices', '.', '*', '*', 'This', 'method', 'is', 'not', 'supported', 'in', 'virtual', 'machines', 'running', 'virtual', 'GPU', '(', 'vGPU', ')', '.', '*', '*']
train
https://github.com/fbcotter/py3nvml/blob/47f0f2c0eee56dec4e4beebec26b734e01d357b7/py3nvml/py3nvml.py#L5263-L5292
8,810
Cognexa/cxflow
cxflow/cli/common.py
run
def run(config: dict, output_root: str, restore_from: str=None, eval: Optional[str]=None) -> None: """ Run **cxflow** training configured by the passed `config`. Unique ``output_dir`` for this training is created under the given ``output_root`` dir wherein all the training outputs are saved. The output dir name will be roughly ``[model.name]_[time]``. The training procedure consists of the following steps: 1. Set up (create output dir and file logger, dump the loaded config into the output dir) 2. Create dataset (YAML string with ``dataset`` and ``log_dir`` configs are passed to the dataset constructor) 3. Create (or restore) model (dataset, ``log_dir`` and model config is passed to the constructor) 4. Create all the training hooks 5. Create the ``MainLoop`` object 6. Run the main loop If any of the steps fails, the training is terminated. After the training procedure finishes, the output dir will contain the following: - ``train_log.txt`` with entry point and main loop logs (same as the stderr) - dumped YAML config Additional outputs created by hooks, dataset or tensorflow may include: - ``dataset_log.txt`` with info about dataset/stream creation - model checkpoint(s) - TensorBoard log file - TensorFlow event log :param config: configuration :param output_root: dir under which output_dir shall be created :param restore_from: from whence the model should be restored (backend-specific information) :param eval: optional name of the stream to be evaluated """ output_dir = dataset = model = hooks = main_loop = None try: output_dir = create_output_dir(config=config, output_root=output_root) except Exception as ex: # pylint: disable=broad-except fallback('Failed to create output dir', ex) try: dataset = create_dataset(config=config, output_dir=output_dir) except Exception as ex: # pylint: disable=broad-except fallback('Creating dataset failed', ex) try: model = create_model(config=config, output_dir=output_dir, dataset=dataset, restore_from=restore_from) except Exception as ex: # pylint: disable=broad-except fallback('Creating model failed', ex) try: # save the config to file # modify the config so that it contains fallback information config['model']['restore_fallback'] = model.restore_fallback yaml_to_file(data=config, output_dir=output_dir, name=CXF_CONFIG_FILE) except Exception as ex: # pylint: disable=broad-except fallback('Saving config failed', ex) try: hooks = create_hooks(config=config, model=model, dataset=dataset, output_dir=output_dir) except Exception as ex: # pylint: disable=broad-except fallback('Creating hooks failed', ex) try: logging.info('Creating main loop') kwargs = config['main_loop'] if 'main_loop' in config else {} if eval is not None: kwargs['extra_streams'] = [] main_loop = MainLoop(model=model, dataset=dataset, hooks=hooks, **kwargs) except Exception as ex: # pylint: disable=broad-except fallback('Creating main loop failed', ex) if eval is not None: try: with main_loop: logging.info('Running the evaluation of stream `%s`', eval) main_loop.run_evaluation(eval) except Exception as ex: # pylint: disable=broad-except fallback('Running the evaluation failed', ex) else: trace = TrainingTrace(output_dir) try: with main_loop: logging.info('Running the training') trace[TrainingTraceKeys.TRAIN_BEGIN] = datetime.now() main_loop.run_training(trace) trace[TrainingTraceKeys.EXIT_STATUS] = 0 except Exception as ex: # pylint: disable=broad-except trace[TrainingTraceKeys.EXIT_STATUS] = 1 fallback('Running the training failed', ex) except SystemExit as ex: trace[TrainingTraceKeys.EXIT_STATUS] = ex.code finally: trace[TrainingTraceKeys.EPOCHS_DONE] = main_loop.epochs_done trace[TrainingTraceKeys.TRAIN_END] = datetime.now()
python
def run(config: dict, output_root: str, restore_from: str=None, eval: Optional[str]=None) -> None: """ Run **cxflow** training configured by the passed `config`. Unique ``output_dir`` for this training is created under the given ``output_root`` dir wherein all the training outputs are saved. The output dir name will be roughly ``[model.name]_[time]``. The training procedure consists of the following steps: 1. Set up (create output dir and file logger, dump the loaded config into the output dir) 2. Create dataset (YAML string with ``dataset`` and ``log_dir`` configs are passed to the dataset constructor) 3. Create (or restore) model (dataset, ``log_dir`` and model config is passed to the constructor) 4. Create all the training hooks 5. Create the ``MainLoop`` object 6. Run the main loop If any of the steps fails, the training is terminated. After the training procedure finishes, the output dir will contain the following: - ``train_log.txt`` with entry point and main loop logs (same as the stderr) - dumped YAML config Additional outputs created by hooks, dataset or tensorflow may include: - ``dataset_log.txt`` with info about dataset/stream creation - model checkpoint(s) - TensorBoard log file - TensorFlow event log :param config: configuration :param output_root: dir under which output_dir shall be created :param restore_from: from whence the model should be restored (backend-specific information) :param eval: optional name of the stream to be evaluated """ output_dir = dataset = model = hooks = main_loop = None try: output_dir = create_output_dir(config=config, output_root=output_root) except Exception as ex: # pylint: disable=broad-except fallback('Failed to create output dir', ex) try: dataset = create_dataset(config=config, output_dir=output_dir) except Exception as ex: # pylint: disable=broad-except fallback('Creating dataset failed', ex) try: model = create_model(config=config, output_dir=output_dir, dataset=dataset, restore_from=restore_from) except Exception as ex: # pylint: disable=broad-except fallback('Creating model failed', ex) try: # save the config to file # modify the config so that it contains fallback information config['model']['restore_fallback'] = model.restore_fallback yaml_to_file(data=config, output_dir=output_dir, name=CXF_CONFIG_FILE) except Exception as ex: # pylint: disable=broad-except fallback('Saving config failed', ex) try: hooks = create_hooks(config=config, model=model, dataset=dataset, output_dir=output_dir) except Exception as ex: # pylint: disable=broad-except fallback('Creating hooks failed', ex) try: logging.info('Creating main loop') kwargs = config['main_loop'] if 'main_loop' in config else {} if eval is not None: kwargs['extra_streams'] = [] main_loop = MainLoop(model=model, dataset=dataset, hooks=hooks, **kwargs) except Exception as ex: # pylint: disable=broad-except fallback('Creating main loop failed', ex) if eval is not None: try: with main_loop: logging.info('Running the evaluation of stream `%s`', eval) main_loop.run_evaluation(eval) except Exception as ex: # pylint: disable=broad-except fallback('Running the evaluation failed', ex) else: trace = TrainingTrace(output_dir) try: with main_loop: logging.info('Running the training') trace[TrainingTraceKeys.TRAIN_BEGIN] = datetime.now() main_loop.run_training(trace) trace[TrainingTraceKeys.EXIT_STATUS] = 0 except Exception as ex: # pylint: disable=broad-except trace[TrainingTraceKeys.EXIT_STATUS] = 1 fallback('Running the training failed', ex) except SystemExit as ex: trace[TrainingTraceKeys.EXIT_STATUS] = ex.code finally: trace[TrainingTraceKeys.EPOCHS_DONE] = main_loop.epochs_done trace[TrainingTraceKeys.TRAIN_END] = datetime.now()
['def', 'run', '(', 'config', ':', 'dict', ',', 'output_root', ':', 'str', ',', 'restore_from', ':', 'str', '=', 'None', ',', 'eval', ':', 'Optional', '[', 'str', ']', '=', 'None', ')', '->', 'None', ':', 'output_dir', '=', 'dataset', '=', 'model', '=', 'hooks', '=', 'main_loop', '=', 'None', 'try', ':', 'output_dir', '=', 'create_output_dir', '(', 'config', '=', 'config', ',', 'output_root', '=', 'output_root', ')', 'except', 'Exception', 'as', 'ex', ':', '# pylint: disable=broad-except', 'fallback', '(', "'Failed to create output dir'", ',', 'ex', ')', 'try', ':', 'dataset', '=', 'create_dataset', '(', 'config', '=', 'config', ',', 'output_dir', '=', 'output_dir', ')', 'except', 'Exception', 'as', 'ex', ':', '# pylint: disable=broad-except', 'fallback', '(', "'Creating dataset failed'", ',', 'ex', ')', 'try', ':', 'model', '=', 'create_model', '(', 'config', '=', 'config', ',', 'output_dir', '=', 'output_dir', ',', 'dataset', '=', 'dataset', ',', 'restore_from', '=', 'restore_from', ')', 'except', 'Exception', 'as', 'ex', ':', '# pylint: disable=broad-except', 'fallback', '(', "'Creating model failed'", ',', 'ex', ')', 'try', ':', '# save the config to file', '# modify the config so that it contains fallback information', 'config', '[', "'model'", ']', '[', "'restore_fallback'", ']', '=', 'model', '.', 'restore_fallback', 'yaml_to_file', '(', 'data', '=', 'config', ',', 'output_dir', '=', 'output_dir', ',', 'name', '=', 'CXF_CONFIG_FILE', ')', 'except', 'Exception', 'as', 'ex', ':', '# pylint: disable=broad-except', 'fallback', '(', "'Saving config failed'", ',', 'ex', ')', 'try', ':', 'hooks', '=', 'create_hooks', '(', 'config', '=', 'config', ',', 'model', '=', 'model', ',', 'dataset', '=', 'dataset', ',', 'output_dir', '=', 'output_dir', ')', 'except', 'Exception', 'as', 'ex', ':', '# pylint: disable=broad-except', 'fallback', '(', "'Creating hooks failed'", ',', 'ex', ')', 'try', ':', 'logging', '.', 'info', '(', "'Creating main loop'", ')', 'kwargs', '=', 'config', '[', "'main_loop'", ']', 'if', "'main_loop'", 'in', 'config', 'else', '{', '}', 'if', 'eval', 'is', 'not', 'None', ':', 'kwargs', '[', "'extra_streams'", ']', '=', '[', ']', 'main_loop', '=', 'MainLoop', '(', 'model', '=', 'model', ',', 'dataset', '=', 'dataset', ',', 'hooks', '=', 'hooks', ',', '*', '*', 'kwargs', ')', 'except', 'Exception', 'as', 'ex', ':', '# pylint: disable=broad-except', 'fallback', '(', "'Creating main loop failed'", ',', 'ex', ')', 'if', 'eval', 'is', 'not', 'None', ':', 'try', ':', 'with', 'main_loop', ':', 'logging', '.', 'info', '(', "'Running the evaluation of stream `%s`'", ',', 'eval', ')', 'main_loop', '.', 'run_evaluation', '(', 'eval', ')', 'except', 'Exception', 'as', 'ex', ':', '# pylint: disable=broad-except', 'fallback', '(', "'Running the evaluation failed'", ',', 'ex', ')', 'else', ':', 'trace', '=', 'TrainingTrace', '(', 'output_dir', ')', 'try', ':', 'with', 'main_loop', ':', 'logging', '.', 'info', '(', "'Running the training'", ')', 'trace', '[', 'TrainingTraceKeys', '.', 'TRAIN_BEGIN', ']', '=', 'datetime', '.', 'now', '(', ')', 'main_loop', '.', 'run_training', '(', 'trace', ')', 'trace', '[', 'TrainingTraceKeys', '.', 'EXIT_STATUS', ']', '=', '0', 'except', 'Exception', 'as', 'ex', ':', '# pylint: disable=broad-except', 'trace', '[', 'TrainingTraceKeys', '.', 'EXIT_STATUS', ']', '=', '1', 'fallback', '(', "'Running the training failed'", ',', 'ex', ')', 'except', 'SystemExit', 'as', 'ex', ':', 'trace', '[', 'TrainingTraceKeys', '.', 'EXIT_STATUS', ']', '=', 'ex', '.', 'code', 'finally', ':', 'trace', '[', 'TrainingTraceKeys', '.', 'EPOCHS_DONE', ']', '=', 'main_loop', '.', 'epochs_done', 'trace', '[', 'TrainingTraceKeys', '.', 'TRAIN_END', ']', '=', 'datetime', '.', 'now', '(', ')']
Run **cxflow** training configured by the passed `config`. Unique ``output_dir`` for this training is created under the given ``output_root`` dir wherein all the training outputs are saved. The output dir name will be roughly ``[model.name]_[time]``. The training procedure consists of the following steps: 1. Set up (create output dir and file logger, dump the loaded config into the output dir) 2. Create dataset (YAML string with ``dataset`` and ``log_dir`` configs are passed to the dataset constructor) 3. Create (or restore) model (dataset, ``log_dir`` and model config is passed to the constructor) 4. Create all the training hooks 5. Create the ``MainLoop`` object 6. Run the main loop If any of the steps fails, the training is terminated. After the training procedure finishes, the output dir will contain the following: - ``train_log.txt`` with entry point and main loop logs (same as the stderr) - dumped YAML config Additional outputs created by hooks, dataset or tensorflow may include: - ``dataset_log.txt`` with info about dataset/stream creation - model checkpoint(s) - TensorBoard log file - TensorFlow event log :param config: configuration :param output_root: dir under which output_dir shall be created :param restore_from: from whence the model should be restored (backend-specific information) :param eval: optional name of the stream to be evaluated
['Run', '**', 'cxflow', '**', 'training', 'configured', 'by', 'the', 'passed', 'config', '.']
train
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/common.py#L215-L309
8,811
PeerAssets/pypeerassets
pypeerassets/pautils.py
load_p2th_privkey_into_local_node
def load_p2th_privkey_into_local_node(provider: RpcNode, prod: bool=True) -> None: '''Load PeerAssets P2TH privkey into the local node.''' assert isinstance(provider, RpcNode), {"error": "Import only works with local node."} error = {"error": "Loading P2TH privkey failed."} pa_params = param_query(provider.network) if prod: provider.importprivkey(pa_params.P2TH_wif, "PAPROD") # now verify if ismine == True if not provider.validateaddress(pa_params.P2TH_addr)['ismine']: raise P2THImportFailed(error) else: provider.importprivkey(pa_params.test_P2TH_wif, "PATEST") if not provider.validateaddress(pa_params.test_P2TH_addr)['ismine']: raise P2THImportFailed(error)
python
def load_p2th_privkey_into_local_node(provider: RpcNode, prod: bool=True) -> None: '''Load PeerAssets P2TH privkey into the local node.''' assert isinstance(provider, RpcNode), {"error": "Import only works with local node."} error = {"error": "Loading P2TH privkey failed."} pa_params = param_query(provider.network) if prod: provider.importprivkey(pa_params.P2TH_wif, "PAPROD") # now verify if ismine == True if not provider.validateaddress(pa_params.P2TH_addr)['ismine']: raise P2THImportFailed(error) else: provider.importprivkey(pa_params.test_P2TH_wif, "PATEST") if not provider.validateaddress(pa_params.test_P2TH_addr)['ismine']: raise P2THImportFailed(error)
['def', 'load_p2th_privkey_into_local_node', '(', 'provider', ':', 'RpcNode', ',', 'prod', ':', 'bool', '=', 'True', ')', '->', 'None', ':', 'assert', 'isinstance', '(', 'provider', ',', 'RpcNode', ')', ',', '{', '"error"', ':', '"Import only works with local node."', '}', 'error', '=', '{', '"error"', ':', '"Loading P2TH privkey failed."', '}', 'pa_params', '=', 'param_query', '(', 'provider', '.', 'network', ')', 'if', 'prod', ':', 'provider', '.', 'importprivkey', '(', 'pa_params', '.', 'P2TH_wif', ',', '"PAPROD"', ')', '# now verify if ismine == True', 'if', 'not', 'provider', '.', 'validateaddress', '(', 'pa_params', '.', 'P2TH_addr', ')', '[', "'ismine'", ']', ':', 'raise', 'P2THImportFailed', '(', 'error', ')', 'else', ':', 'provider', '.', 'importprivkey', '(', 'pa_params', '.', 'test_P2TH_wif', ',', '"PATEST"', ')', 'if', 'not', 'provider', '.', 'validateaddress', '(', 'pa_params', '.', 'test_P2TH_addr', ')', '[', "'ismine'", ']', ':', 'raise', 'P2THImportFailed', '(', 'error', ')']
Load PeerAssets P2TH privkey into the local node.
['Load', 'PeerAssets', 'P2TH', 'privkey', 'into', 'the', 'local', 'node', '.']
train
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/pautils.py#L30-L45
8,812
johntruckenbrodt/spatialist
spatialist/raster.py
Raster.res
def res(self): """ the raster resolution in x and y direction Returns ------- tuple (xres, yres) """ return (abs(float(self.geo['xres'])), abs(float(self.geo['yres'])))
python
def res(self): """ the raster resolution in x and y direction Returns ------- tuple (xres, yres) """ return (abs(float(self.geo['xres'])), abs(float(self.geo['yres'])))
['def', 'res', '(', 'self', ')', ':', 'return', '(', 'abs', '(', 'float', '(', 'self', '.', 'geo', '[', "'xres'", ']', ')', ')', ',', 'abs', '(', 'float', '(', 'self', '.', 'geo', '[', "'yres'", ']', ')', ')', ')']
the raster resolution in x and y direction Returns ------- tuple (xres, yres)
['the', 'raster', 'resolution', 'in', 'x', 'and', 'y', 'direction']
train
https://github.com/johntruckenbrodt/spatialist/blob/007f49296a156de8d7168ad235b5a5b8e8d3633d/spatialist/raster.py#L721-L730
8,813
farshidce/touchworks-python
touchworks/api/http.py
TouchWorks._http_request
def _http_request(self, api, data, headers=None): """ internal method for handling request and response and raising an exception is http return status code is not success :rtype : response object from requests.post() """ if not headers: headers = {'Content-Type': 'application/json'} if not self._token_valid: self._token = self.get_token(self._app_name, self._username, self._password) response = requests.post(self._base_url + '/' + api, data=json.dumps(data), headers=headers) # raise an exception if the status was not 200 logger.debug(json.dumps(data)) logger.debug(response.text) response.raise_for_status() return response
python
def _http_request(self, api, data, headers=None): """ internal method for handling request and response and raising an exception is http return status code is not success :rtype : response object from requests.post() """ if not headers: headers = {'Content-Type': 'application/json'} if not self._token_valid: self._token = self.get_token(self._app_name, self._username, self._password) response = requests.post(self._base_url + '/' + api, data=json.dumps(data), headers=headers) # raise an exception if the status was not 200 logger.debug(json.dumps(data)) logger.debug(response.text) response.raise_for_status() return response
['def', '_http_request', '(', 'self', ',', 'api', ',', 'data', ',', 'headers', '=', 'None', ')', ':', 'if', 'not', 'headers', ':', 'headers', '=', '{', "'Content-Type'", ':', "'application/json'", '}', 'if', 'not', 'self', '.', '_token_valid', ':', 'self', '.', '_token', '=', 'self', '.', 'get_token', '(', 'self', '.', '_app_name', ',', 'self', '.', '_username', ',', 'self', '.', '_password', ')', 'response', '=', 'requests', '.', 'post', '(', 'self', '.', '_base_url', '+', "'/'", '+', 'api', ',', 'data', '=', 'json', '.', 'dumps', '(', 'data', ')', ',', 'headers', '=', 'headers', ')', '# raise an exception if the status was not 200', 'logger', '.', 'debug', '(', 'json', '.', 'dumps', '(', 'data', ')', ')', 'logger', '.', 'debug', '(', 'response', '.', 'text', ')', 'response', '.', 'raise_for_status', '(', ')', 'return', 'response']
internal method for handling request and response and raising an exception is http return status code is not success :rtype : response object from requests.post()
['internal', 'method', 'for', 'handling', 'request', 'and', 'response', 'and', 'raising', 'an', 'exception', 'is', 'http', 'return', 'status', 'code', 'is', 'not', 'success']
train
https://github.com/farshidce/touchworks-python/blob/ea8f93a0f4273de1317a318e945a571f5038ba62/touchworks/api/http.py#L178-L195
8,814
polyaxon/polyaxon
polyaxon/api/experiments/serializers.py
ExperimentCreateSerializer.validate_config
def validate_config(self, config): """We only validate the config if passed. Also we use the ExperimentSpecification to check if this config was intended as an experiment. """ # config is optional if not config: return config spec = validate_experiment_spec_config(config) if spec.is_experiment: # Resume normal creation return config # Raise an error to tell the user to use experiment creation instead raise ValidationError('Current experiment creation could not be performed.\n' 'The reason is that the specification sent correspond ' 'to a `{}`.\n'.format(spec.kind))
python
def validate_config(self, config): """We only validate the config if passed. Also we use the ExperimentSpecification to check if this config was intended as an experiment. """ # config is optional if not config: return config spec = validate_experiment_spec_config(config) if spec.is_experiment: # Resume normal creation return config # Raise an error to tell the user to use experiment creation instead raise ValidationError('Current experiment creation could not be performed.\n' 'The reason is that the specification sent correspond ' 'to a `{}`.\n'.format(spec.kind))
['def', 'validate_config', '(', 'self', ',', 'config', ')', ':', '# config is optional', 'if', 'not', 'config', ':', 'return', 'config', 'spec', '=', 'validate_experiment_spec_config', '(', 'config', ')', 'if', 'spec', '.', 'is_experiment', ':', '# Resume normal creation', 'return', 'config', '# Raise an error to tell the user to use experiment creation instead', 'raise', 'ValidationError', '(', "'Current experiment creation could not be performed.\\n'", "'The reason is that the specification sent correspond '", "'to a `{}`.\\n'", '.', 'format', '(', 'spec', '.', 'kind', ')', ')']
We only validate the config if passed. Also we use the ExperimentSpecification to check if this config was intended as an experiment.
['We', 'only', 'validate', 'the', 'config', 'if', 'passed', '.']
train
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/api/experiments/serializers.py#L269-L288
8,815
jsfenfen/990-xml-reader
irs_reader/file_utils.py
validate_object_id
def validate_object_id(object_id): """ It's easy to make a mistake entering these, validate the format """ result = re.match(OBJECT_ID_RE, str(object_id)) if not result: print("'%s' appears not to be a valid 990 object_id" % object_id) raise RuntimeError(OBJECT_ID_MSG) return object_id
python
def validate_object_id(object_id): """ It's easy to make a mistake entering these, validate the format """ result = re.match(OBJECT_ID_RE, str(object_id)) if not result: print("'%s' appears not to be a valid 990 object_id" % object_id) raise RuntimeError(OBJECT_ID_MSG) return object_id
['def', 'validate_object_id', '(', 'object_id', ')', ':', 'result', '=', 're', '.', 'match', '(', 'OBJECT_ID_RE', ',', 'str', '(', 'object_id', ')', ')', 'if', 'not', 'result', ':', 'print', '(', '"\'%s\' appears not to be a valid 990 object_id"', '%', 'object_id', ')', 'raise', 'RuntimeError', '(', 'OBJECT_ID_MSG', ')', 'return', 'object_id']
It's easy to make a mistake entering these, validate the format
['It', 's', 'easy', 'to', 'make', 'a', 'mistake', 'entering', 'these', 'validate', 'the', 'format']
train
https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/file_utils.py#L42-L48
8,816
googleapis/google-cloud-python
api_core/google/api_core/gapic_v1/config.py
_retry_from_retry_config
def _retry_from_retry_config(retry_params, retry_codes): """Creates a Retry object given a gapic retry configuration. Args: retry_params (dict): The retry parameter values, for example:: { "initial_retry_delay_millis": 1000, "retry_delay_multiplier": 2.5, "max_retry_delay_millis": 120000, "initial_rpc_timeout_millis": 120000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 120000, "total_timeout_millis": 600000 } retry_codes (sequence[str]): The list of retryable gRPC error code names. Returns: google.api_core.retry.Retry: The default retry object for the method. """ exception_classes = [ _exception_class_for_grpc_status_name(code) for code in retry_codes ] return retry.Retry( retry.if_exception_type(*exception_classes), initial=(retry_params["initial_retry_delay_millis"] / _MILLIS_PER_SECOND), maximum=(retry_params["max_retry_delay_millis"] / _MILLIS_PER_SECOND), multiplier=retry_params["retry_delay_multiplier"], deadline=retry_params["total_timeout_millis"] / _MILLIS_PER_SECOND, )
python
def _retry_from_retry_config(retry_params, retry_codes): """Creates a Retry object given a gapic retry configuration. Args: retry_params (dict): The retry parameter values, for example:: { "initial_retry_delay_millis": 1000, "retry_delay_multiplier": 2.5, "max_retry_delay_millis": 120000, "initial_rpc_timeout_millis": 120000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 120000, "total_timeout_millis": 600000 } retry_codes (sequence[str]): The list of retryable gRPC error code names. Returns: google.api_core.retry.Retry: The default retry object for the method. """ exception_classes = [ _exception_class_for_grpc_status_name(code) for code in retry_codes ] return retry.Retry( retry.if_exception_type(*exception_classes), initial=(retry_params["initial_retry_delay_millis"] / _MILLIS_PER_SECOND), maximum=(retry_params["max_retry_delay_millis"] / _MILLIS_PER_SECOND), multiplier=retry_params["retry_delay_multiplier"], deadline=retry_params["total_timeout_millis"] / _MILLIS_PER_SECOND, )
['def', '_retry_from_retry_config', '(', 'retry_params', ',', 'retry_codes', ')', ':', 'exception_classes', '=', '[', '_exception_class_for_grpc_status_name', '(', 'code', ')', 'for', 'code', 'in', 'retry_codes', ']', 'return', 'retry', '.', 'Retry', '(', 'retry', '.', 'if_exception_type', '(', '*', 'exception_classes', ')', ',', 'initial', '=', '(', 'retry_params', '[', '"initial_retry_delay_millis"', ']', '/', '_MILLIS_PER_SECOND', ')', ',', 'maximum', '=', '(', 'retry_params', '[', '"max_retry_delay_millis"', ']', '/', '_MILLIS_PER_SECOND', ')', ',', 'multiplier', '=', 'retry_params', '[', '"retry_delay_multiplier"', ']', ',', 'deadline', '=', 'retry_params', '[', '"total_timeout_millis"', ']', '/', '_MILLIS_PER_SECOND', ',', ')']
Creates a Retry object given a gapic retry configuration. Args: retry_params (dict): The retry parameter values, for example:: { "initial_retry_delay_millis": 1000, "retry_delay_multiplier": 2.5, "max_retry_delay_millis": 120000, "initial_rpc_timeout_millis": 120000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 120000, "total_timeout_millis": 600000 } retry_codes (sequence[str]): The list of retryable gRPC error code names. Returns: google.api_core.retry.Retry: The default retry object for the method.
['Creates', 'a', 'Retry', 'object', 'given', 'a', 'gapic', 'retry', 'configuration', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/gapic_v1/config.py#L48-L79
8,817
JukeboxPipeline/jukebox-core
src/jukeboxcore/addons/guerilla/guerillamgmt.py
GuerillaMGMTWin.task_view_user
def task_view_user(self, ): """View the user that is currently selected :returns: None :rtype: None :raises: None """ if not self.cur_task: return i = self.task_user_tablev.currentIndex() item = i.internalPointer() if item: user = item.internal_data() self.view_user(user)
python
def task_view_user(self, ): """View the user that is currently selected :returns: None :rtype: None :raises: None """ if not self.cur_task: return i = self.task_user_tablev.currentIndex() item = i.internalPointer() if item: user = item.internal_data() self.view_user(user)
['def', 'task_view_user', '(', 'self', ',', ')', ':', 'if', 'not', 'self', '.', 'cur_task', ':', 'return', 'i', '=', 'self', '.', 'task_user_tablev', '.', 'currentIndex', '(', ')', 'item', '=', 'i', '.', 'internalPointer', '(', ')', 'if', 'item', ':', 'user', '=', 'item', '.', 'internal_data', '(', ')', 'self', '.', 'view_user', '(', 'user', ')']
View the user that is currently selected :returns: None :rtype: None :raises: None
['View', 'the', 'user', 'that', 'is', 'currently', 'selected']
train
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/addons/guerilla/guerillamgmt.py#L2224-L2237
8,818
DataDog/integrations-core
datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py
ApplyResult.get
def get(self, timeout=None): """ Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get(). """ if not self.wait(timeout): raise TimeoutError("Result not available within %fs" % timeout) if self._success: return self._data raise self._data[0]
python
def get(self, timeout=None): """ Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get(). """ if not self.wait(timeout): raise TimeoutError("Result not available within %fs" % timeout) if self._success: return self._data raise self._data[0]
['def', 'get', '(', 'self', ',', 'timeout', '=', 'None', ')', ':', 'if', 'not', 'self', '.', 'wait', '(', 'timeout', ')', ':', 'raise', 'TimeoutError', '(', '"Result not available within %fs"', '%', 'timeout', ')', 'if', 'self', '.', '_success', ':', 'return', 'self', '.', '_data', 'raise', 'self', '.', '_data', '[', '0', ']']
Returns the result when it arrives. If timeout is not None and the result does not arrive within timeout seconds then TimeoutError is raised. If the remote call raised an exception then that exception will be reraised by get().
['Returns', 'the', 'result', 'when', 'it', 'arrives', '.', 'If', 'timeout', 'is', 'not', 'None', 'and', 'the', 'result', 'does', 'not', 'arrive', 'within', 'timeout', 'seconds', 'then', 'TimeoutError', 'is', 'raised', '.', 'If', 'the', 'remote', 'call', 'raised', 'an', 'exception', 'then', 'that', 'exception', 'will', 'be', 'reraised', 'by', 'get', '()', '.']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_base/datadog_checks/base/checks/libs/thread_pool.py#L349-L360
8,819
apache/spark
python/pyspark/streaming/dstream.py
DStream.countByValueAndWindow
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None): """ Return a new DStream in which each RDD contains the count of distinct elements in RDDs in a sliding window over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. """ keyed = self.map(lambda x: (x, 1)) counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub, windowDuration, slideDuration, numPartitions) return counted.filter(lambda kv: kv[1] > 0)
python
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None): """ Return a new DStream in which each RDD contains the count of distinct elements in RDDs in a sliding window over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. """ keyed = self.map(lambda x: (x, 1)) counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub, windowDuration, slideDuration, numPartitions) return counted.filter(lambda kv: kv[1] > 0)
['def', 'countByValueAndWindow', '(', 'self', ',', 'windowDuration', ',', 'slideDuration', ',', 'numPartitions', '=', 'None', ')', ':', 'keyed', '=', 'self', '.', 'map', '(', 'lambda', 'x', ':', '(', 'x', ',', '1', ')', ')', 'counted', '=', 'keyed', '.', 'reduceByKeyAndWindow', '(', 'operator', '.', 'add', ',', 'operator', '.', 'sub', ',', 'windowDuration', ',', 'slideDuration', ',', 'numPartitions', ')', 'return', 'counted', '.', 'filter', '(', 'lambda', 'kv', ':', 'kv', '[', '1', ']', '>', '0', ')']
Return a new DStream in which each RDD contains the count of distinct elements in RDDs in a sliding window over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream.
['Return', 'a', 'new', 'DStream', 'in', 'which', 'each', 'RDD', 'contains', 'the', 'count', 'of', 'distinct', 'elements', 'in', 'RDDs', 'in', 'a', 'sliding', 'window', 'over', 'this', 'DStream', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L485-L500
8,820
numenta/htmresearch
projects/l2_pooling/topology_experiments.py
plotConvergenceByDistantConnectionChance
def plotConvergenceByDistantConnectionChance(results, featureRange, columnRange, longDistanceConnectionsRange, numTrials): """ Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # Convergence[f, c, t] = how long it took it to converge with f unique # features, c columns and topology t. convergence = numpy.zeros((len(featureRange), len(longDistanceConnectionsRange), len(columnRange))) for r in results: print longDistanceConnectionsRange.index(r["longDistanceConnections"]) print columnRange.index(r["numColumns"]) convergence[featureRange.index(r["numFeatures"]), longDistanceConnectionsRange.index(r["longDistanceConnections"]), columnRange.index(r["numColumns"])] += r["convergencePoint"] convergence /= numTrials # For each column, print convergence as fct of number of unique features for i, c in enumerate(columnRange): for j, r in enumerate(longDistanceConnectionsRange): print c, r, convergence[:, j, i] # Print everything anyway for debugging print "Average convergence array=", convergence ######################################################################## # # Create the plot. x-axis= plt.figure(figsize=(8, 6), dpi=80) plotPath = os.path.join("plots", "convergence_by_random_connection_chance.pdf") # Plot each curve legendList = [] colormap = plt.get_cmap("jet") colorList = [colormap(x) for x in numpy.linspace(0., 1., len(featureRange)*len(longDistanceConnectionsRange))] for i, r in enumerate(longDistanceConnectionsRange): for j, f in enumerate(featureRange): currentColor = i*len(featureRange) + j print columnRange print convergence[j, i, :] legendList.append('Connection_prob = {}, num features = {}'.format(r, f)) plt.plot(columnRange, convergence[j, i, :], color=colorList[currentColor]) # format plt.legend(legendList, loc = "lower left") plt.xlabel("Number of columns") plt.xticks(columnRange) plt.yticks(range(0,int(convergence.max())+1)) plt.ylabel("Average number of touches") plt.title("Number of touches to recognize one object (multiple columns)") # save plt.show() plt.savefig(plotPath) plt.close()
python
def plotConvergenceByDistantConnectionChance(results, featureRange, columnRange, longDistanceConnectionsRange, numTrials): """ Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features. """ ######################################################################## # # Accumulate all the results per column in a convergence array. # # Convergence[f, c, t] = how long it took it to converge with f unique # features, c columns and topology t. convergence = numpy.zeros((len(featureRange), len(longDistanceConnectionsRange), len(columnRange))) for r in results: print longDistanceConnectionsRange.index(r["longDistanceConnections"]) print columnRange.index(r["numColumns"]) convergence[featureRange.index(r["numFeatures"]), longDistanceConnectionsRange.index(r["longDistanceConnections"]), columnRange.index(r["numColumns"])] += r["convergencePoint"] convergence /= numTrials # For each column, print convergence as fct of number of unique features for i, c in enumerate(columnRange): for j, r in enumerate(longDistanceConnectionsRange): print c, r, convergence[:, j, i] # Print everything anyway for debugging print "Average convergence array=", convergence ######################################################################## # # Create the plot. x-axis= plt.figure(figsize=(8, 6), dpi=80) plotPath = os.path.join("plots", "convergence_by_random_connection_chance.pdf") # Plot each curve legendList = [] colormap = plt.get_cmap("jet") colorList = [colormap(x) for x in numpy.linspace(0., 1., len(featureRange)*len(longDistanceConnectionsRange))] for i, r in enumerate(longDistanceConnectionsRange): for j, f in enumerate(featureRange): currentColor = i*len(featureRange) + j print columnRange print convergence[j, i, :] legendList.append('Connection_prob = {}, num features = {}'.format(r, f)) plt.plot(columnRange, convergence[j, i, :], color=colorList[currentColor]) # format plt.legend(legendList, loc = "lower left") plt.xlabel("Number of columns") plt.xticks(columnRange) plt.yticks(range(0,int(convergence.max())+1)) plt.ylabel("Average number of touches") plt.title("Number of touches to recognize one object (multiple columns)") # save plt.show() plt.savefig(plotPath) plt.close()
['def', 'plotConvergenceByDistantConnectionChance', '(', 'results', ',', 'featureRange', ',', 'columnRange', ',', 'longDistanceConnectionsRange', ',', 'numTrials', ')', ':', '########################################################################', '#', '# Accumulate all the results per column in a convergence array.', '#', '# Convergence[f, c, t] = how long it took it to converge with f unique', '# features, c columns and topology t.', 'convergence', '=', 'numpy', '.', 'zeros', '(', '(', 'len', '(', 'featureRange', ')', ',', 'len', '(', 'longDistanceConnectionsRange', ')', ',', 'len', '(', 'columnRange', ')', ')', ')', 'for', 'r', 'in', 'results', ':', 'print', 'longDistanceConnectionsRange', '.', 'index', '(', 'r', '[', '"longDistanceConnections"', ']', ')', 'print', 'columnRange', '.', 'index', '(', 'r', '[', '"numColumns"', ']', ')', 'convergence', '[', 'featureRange', '.', 'index', '(', 'r', '[', '"numFeatures"', ']', ')', ',', 'longDistanceConnectionsRange', '.', 'index', '(', 'r', '[', '"longDistanceConnections"', ']', ')', ',', 'columnRange', '.', 'index', '(', 'r', '[', '"numColumns"', ']', ')', ']', '+=', 'r', '[', '"convergencePoint"', ']', 'convergence', '/=', 'numTrials', '# For each column, print convergence as fct of number of unique features', 'for', 'i', ',', 'c', 'in', 'enumerate', '(', 'columnRange', ')', ':', 'for', 'j', ',', 'r', 'in', 'enumerate', '(', 'longDistanceConnectionsRange', ')', ':', 'print', 'c', ',', 'r', ',', 'convergence', '[', ':', ',', 'j', ',', 'i', ']', '# Print everything anyway for debugging', 'print', '"Average convergence array="', ',', 'convergence', '########################################################################', '#', '# Create the plot. x-axis=', 'plt', '.', 'figure', '(', 'figsize', '=', '(', '8', ',', '6', ')', ',', 'dpi', '=', '80', ')', 'plotPath', '=', 'os', '.', 'path', '.', 'join', '(', '"plots"', ',', '"convergence_by_random_connection_chance.pdf"', ')', '# Plot each curve', 'legendList', '=', '[', ']', 'colormap', '=', 'plt', '.', 'get_cmap', '(', '"jet"', ')', 'colorList', '=', '[', 'colormap', '(', 'x', ')', 'for', 'x', 'in', 'numpy', '.', 'linspace', '(', '0.', ',', '1.', ',', 'len', '(', 'featureRange', ')', '*', 'len', '(', 'longDistanceConnectionsRange', ')', ')', ']', 'for', 'i', ',', 'r', 'in', 'enumerate', '(', 'longDistanceConnectionsRange', ')', ':', 'for', 'j', ',', 'f', 'in', 'enumerate', '(', 'featureRange', ')', ':', 'currentColor', '=', 'i', '*', 'len', '(', 'featureRange', ')', '+', 'j', 'print', 'columnRange', 'print', 'convergence', '[', 'j', ',', 'i', ',', ':', ']', 'legendList', '.', 'append', '(', "'Connection_prob = {}, num features = {}'", '.', 'format', '(', 'r', ',', 'f', ')', ')', 'plt', '.', 'plot', '(', 'columnRange', ',', 'convergence', '[', 'j', ',', 'i', ',', ':', ']', ',', 'color', '=', 'colorList', '[', 'currentColor', ']', ')', '# format', 'plt', '.', 'legend', '(', 'legendList', ',', 'loc', '=', '"lower left"', ')', 'plt', '.', 'xlabel', '(', '"Number of columns"', ')', 'plt', '.', 'xticks', '(', 'columnRange', ')', 'plt', '.', 'yticks', '(', 'range', '(', '0', ',', 'int', '(', 'convergence', '.', 'max', '(', ')', ')', '+', '1', ')', ')', 'plt', '.', 'ylabel', '(', '"Average number of touches"', ')', 'plt', '.', 'title', '(', '"Number of touches to recognize one object (multiple columns)"', ')', '# save', 'plt', '.', 'show', '(', ')', 'plt', '.', 'savefig', '(', 'plotPath', ')', 'plt', '.', 'close', '(', ')']
Plots the convergence graph: iterations vs number of columns. Each curve shows the convergence for a given number of unique features.
['Plots', 'the', 'convergence', 'graph', ':', 'iterations', 'vs', 'number', 'of', 'columns', '.', 'Each', 'curve', 'shows', 'the', 'convergence', 'for', 'a', 'given', 'number', 'of', 'unique', 'features', '.']
train
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/l2_pooling/topology_experiments.py#L186-L247
8,821
CalebBell/ht
ht/boiling_nucleic.py
Zuber
def Zuber(sigma, Hvap, rhol, rhog, K=0.18): r'''Calculates critical heat flux for nucleic boiling of a flat plate or other shape as presented in various sources. K = pi/24 is believed to be the original [1]_ value for K, but 0.149 is now more widely used, a value claimed to be from [2]_ according to [5]_. Cao [4]_ lists a value of 0.18 for K. The Wolverine Tube data book also lists a value of 0.18, and so it is the default. .. math:: q_c = 0.149H_{vap} \rho_g^{0.5}\left[\sigma g (\rho_L-\rho_g)\right]^{0.25} Parameters ---------- sigma : float Surface tension of liquid [N/m] Hvap : float Heat of vaporization of the fluid at P, [J/kg] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the produced gas [kg/m^3] K : float Constant [] Returns ------- q: float Critical heat flux [W/m^2] Notes ----- No further work is required on this correlation. Multiple sources confirm its form. Examples -------- Example from [3]_ >>> Zuber(sigma=8.2E-3, Hvap=272E3, rhol=567, rhog=18.09, K=0.149) 444307.22304342285 >>> Zuber(sigma=8.2E-3, Hvap=272E3, rhol=567, rhog=18.09, K=0.18) 536746.9808578263 References ---------- .. [1] Zuber N. "On the stability of boiling heat transfer". Trans ASME 1958 80:711-20. .. [2] Lienhard, J.H., and Dhir, V.K., 1973, Extended Hydrodynamic Theory of the Peak and Minimum Heat Fluxes, NASA CR-2270. .. [3] Serth, R. W., Process Heat Transfer: Principles, Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014. .. [4] Cao, Eduardo. Heat Transfer in Process Engineering. McGraw Hill Professional, 2009. .. [5] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat Transfer, 7E.Mason, OH: Cengage Learning, 2010. ''' return K*Hvap*rhog**0.5*(g*sigma*(rhol-rhog))**0.25
python
def Zuber(sigma, Hvap, rhol, rhog, K=0.18): r'''Calculates critical heat flux for nucleic boiling of a flat plate or other shape as presented in various sources. K = pi/24 is believed to be the original [1]_ value for K, but 0.149 is now more widely used, a value claimed to be from [2]_ according to [5]_. Cao [4]_ lists a value of 0.18 for K. The Wolverine Tube data book also lists a value of 0.18, and so it is the default. .. math:: q_c = 0.149H_{vap} \rho_g^{0.5}\left[\sigma g (\rho_L-\rho_g)\right]^{0.25} Parameters ---------- sigma : float Surface tension of liquid [N/m] Hvap : float Heat of vaporization of the fluid at P, [J/kg] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the produced gas [kg/m^3] K : float Constant [] Returns ------- q: float Critical heat flux [W/m^2] Notes ----- No further work is required on this correlation. Multiple sources confirm its form. Examples -------- Example from [3]_ >>> Zuber(sigma=8.2E-3, Hvap=272E3, rhol=567, rhog=18.09, K=0.149) 444307.22304342285 >>> Zuber(sigma=8.2E-3, Hvap=272E3, rhol=567, rhog=18.09, K=0.18) 536746.9808578263 References ---------- .. [1] Zuber N. "On the stability of boiling heat transfer". Trans ASME 1958 80:711-20. .. [2] Lienhard, J.H., and Dhir, V.K., 1973, Extended Hydrodynamic Theory of the Peak and Minimum Heat Fluxes, NASA CR-2270. .. [3] Serth, R. W., Process Heat Transfer: Principles, Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014. .. [4] Cao, Eduardo. Heat Transfer in Process Engineering. McGraw Hill Professional, 2009. .. [5] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat Transfer, 7E.Mason, OH: Cengage Learning, 2010. ''' return K*Hvap*rhog**0.5*(g*sigma*(rhol-rhog))**0.25
['def', 'Zuber', '(', 'sigma', ',', 'Hvap', ',', 'rhol', ',', 'rhog', ',', 'K', '=', '0.18', ')', ':', 'return', 'K', '*', 'Hvap', '*', 'rhog', '**', '0.5', '*', '(', 'g', '*', 'sigma', '*', '(', 'rhol', '-', 'rhog', ')', ')', '**', '0.25']
r'''Calculates critical heat flux for nucleic boiling of a flat plate or other shape as presented in various sources. K = pi/24 is believed to be the original [1]_ value for K, but 0.149 is now more widely used, a value claimed to be from [2]_ according to [5]_. Cao [4]_ lists a value of 0.18 for K. The Wolverine Tube data book also lists a value of 0.18, and so it is the default. .. math:: q_c = 0.149H_{vap} \rho_g^{0.5}\left[\sigma g (\rho_L-\rho_g)\right]^{0.25} Parameters ---------- sigma : float Surface tension of liquid [N/m] Hvap : float Heat of vaporization of the fluid at P, [J/kg] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the produced gas [kg/m^3] K : float Constant [] Returns ------- q: float Critical heat flux [W/m^2] Notes ----- No further work is required on this correlation. Multiple sources confirm its form. Examples -------- Example from [3]_ >>> Zuber(sigma=8.2E-3, Hvap=272E3, rhol=567, rhog=18.09, K=0.149) 444307.22304342285 >>> Zuber(sigma=8.2E-3, Hvap=272E3, rhol=567, rhog=18.09, K=0.18) 536746.9808578263 References ---------- .. [1] Zuber N. "On the stability of boiling heat transfer". Trans ASME 1958 80:711-20. .. [2] Lienhard, J.H., and Dhir, V.K., 1973, Extended Hydrodynamic Theory of the Peak and Minimum Heat Fluxes, NASA CR-2270. .. [3] Serth, R. W., Process Heat Transfer: Principles, Applications and Rules of Thumb. 2E. Amsterdam: Academic Press, 2014. .. [4] Cao, Eduardo. Heat Transfer in Process Engineering. McGraw Hill Professional, 2009. .. [5] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat Transfer, 7E.Mason, OH: Cengage Learning, 2010.
['r', 'Calculates', 'critical', 'heat', 'flux', 'for', 'nucleic', 'boiling', 'of', 'a', 'flat', 'plate', 'or', 'other', 'shape', 'as', 'presented', 'in', 'various', 'sources', '.', 'K', '=', 'pi', '/', '24', 'is', 'believed', 'to', 'be', 'the', 'original', '[', '1', ']', '_', 'value', 'for', 'K', 'but', '0', '.', '149', 'is', 'now', 'more', 'widely', 'used', 'a', 'value', 'claimed', 'to', 'be', 'from', '[', '2', ']', '_', 'according', 'to', '[', '5', ']', '_', '.', 'Cao', '[', '4', ']', '_', 'lists', 'a', 'value', 'of', '0', '.', '18', 'for', 'K', '.', 'The', 'Wolverine', 'Tube', 'data', 'book', 'also', 'lists', 'a', 'value', 'of', '0', '.', '18', 'and', 'so', 'it', 'is', 'the', 'default', '.']
train
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/boiling_nucleic.py#L1000-L1056
8,822
kedpter/secret_miner
pjutils.py
execute_by_options
def execute_by_options(args): """execute by argument dictionary Args: args (dict): command line argument dictionary """ if args['subcommand'] == 'sphinx': s = Sphinx(proj_info) if args['quickstart']: s.quickstart() elif args['gen_code_api']: s.gen_code_api() elif args['rst2html']: s.rst2html() pass elif args['subcommand'] == 'offline_dist': pod = PyOfflineDist() if args['freeze_deps']: pod.freeze_deps() elif args['download_deps']: pod.download_deps() elif args['install_deps']: pod.install_deps() elif args['clean_deps']: pod.clean_deps() elif args['mkbinary']: pod.pyinstaller_mkbinary(args['mkbinary']) elif args['clean_binary']: pod.clean_binary() pass
python
def execute_by_options(args): """execute by argument dictionary Args: args (dict): command line argument dictionary """ if args['subcommand'] == 'sphinx': s = Sphinx(proj_info) if args['quickstart']: s.quickstart() elif args['gen_code_api']: s.gen_code_api() elif args['rst2html']: s.rst2html() pass elif args['subcommand'] == 'offline_dist': pod = PyOfflineDist() if args['freeze_deps']: pod.freeze_deps() elif args['download_deps']: pod.download_deps() elif args['install_deps']: pod.install_deps() elif args['clean_deps']: pod.clean_deps() elif args['mkbinary']: pod.pyinstaller_mkbinary(args['mkbinary']) elif args['clean_binary']: pod.clean_binary() pass
['def', 'execute_by_options', '(', 'args', ')', ':', 'if', 'args', '[', "'subcommand'", ']', '==', "'sphinx'", ':', 's', '=', 'Sphinx', '(', 'proj_info', ')', 'if', 'args', '[', "'quickstart'", ']', ':', 's', '.', 'quickstart', '(', ')', 'elif', 'args', '[', "'gen_code_api'", ']', ':', 's', '.', 'gen_code_api', '(', ')', 'elif', 'args', '[', "'rst2html'", ']', ':', 's', '.', 'rst2html', '(', ')', 'pass', 'elif', 'args', '[', "'subcommand'", ']', '==', "'offline_dist'", ':', 'pod', '=', 'PyOfflineDist', '(', ')', 'if', 'args', '[', "'freeze_deps'", ']', ':', 'pod', '.', 'freeze_deps', '(', ')', 'elif', 'args', '[', "'download_deps'", ']', ':', 'pod', '.', 'download_deps', '(', ')', 'elif', 'args', '[', "'install_deps'", ']', ':', 'pod', '.', 'install_deps', '(', ')', 'elif', 'args', '[', "'clean_deps'", ']', ':', 'pod', '.', 'clean_deps', '(', ')', 'elif', 'args', '[', "'mkbinary'", ']', ':', 'pod', '.', 'pyinstaller_mkbinary', '(', 'args', '[', "'mkbinary'", ']', ')', 'elif', 'args', '[', "'clean_binary'", ']', ':', 'pod', '.', 'clean_binary', '(', ')', 'pass']
execute by argument dictionary Args: args (dict): command line argument dictionary
['execute', 'by', 'argument', 'dictionary']
train
https://github.com/kedpter/secret_miner/blob/3b4ebe58e11fb688d7e8928ebaa2871fc43717e4/pjutils.py#L417-L448
8,823
CellProfiler/centrosome
centrosome/filter.py
laplacian_of_gaussian
def laplacian_of_gaussian(image, mask, size, sigma): '''Perform the Laplacian of Gaussian transform on the image image - 2-d image array mask - binary mask of significant pixels size - length of side of square kernel to use sigma - standard deviation of the Gaussian ''' half_size = size//2 i,j = np.mgrid[-half_size:half_size+1, -half_size:half_size+1].astype(float) / float(sigma) distance = (i**2 + j**2)/2 gaussian = np.exp(-distance) # # Normalize the Gaussian # gaussian = gaussian / np.sum(gaussian) log = (distance - 1) * gaussian # # Normalize the kernel to have a sum of zero # log = log - np.mean(log) if mask is None: mask = np.ones(image.shape[:2], bool) masked_image = image.copy() masked_image[~mask] = 0 output = convolve(masked_image, log, mode='constant', cval=0) # # Do the LoG of the inverse of the mask. This finds the magnitude of the # contribution of the masked pixels. We then fudge by multiplying by the # value at the pixel of interest - this effectively sets the value at a # masked pixel to that of the pixel of interest. # # It underestimates the LoG, that's not a terrible thing. # correction = convolve((~ mask).astype(float), log, mode='constant', cval = 1) output += correction * image output[~ mask] = image[~ mask] return output
python
def laplacian_of_gaussian(image, mask, size, sigma): '''Perform the Laplacian of Gaussian transform on the image image - 2-d image array mask - binary mask of significant pixels size - length of side of square kernel to use sigma - standard deviation of the Gaussian ''' half_size = size//2 i,j = np.mgrid[-half_size:half_size+1, -half_size:half_size+1].astype(float) / float(sigma) distance = (i**2 + j**2)/2 gaussian = np.exp(-distance) # # Normalize the Gaussian # gaussian = gaussian / np.sum(gaussian) log = (distance - 1) * gaussian # # Normalize the kernel to have a sum of zero # log = log - np.mean(log) if mask is None: mask = np.ones(image.shape[:2], bool) masked_image = image.copy() masked_image[~mask] = 0 output = convolve(masked_image, log, mode='constant', cval=0) # # Do the LoG of the inverse of the mask. This finds the magnitude of the # contribution of the masked pixels. We then fudge by multiplying by the # value at the pixel of interest - this effectively sets the value at a # masked pixel to that of the pixel of interest. # # It underestimates the LoG, that's not a terrible thing. # correction = convolve((~ mask).astype(float), log, mode='constant', cval = 1) output += correction * image output[~ mask] = image[~ mask] return output
['def', 'laplacian_of_gaussian', '(', 'image', ',', 'mask', ',', 'size', ',', 'sigma', ')', ':', 'half_size', '=', 'size', '//', '2', 'i', ',', 'j', '=', 'np', '.', 'mgrid', '[', '-', 'half_size', ':', 'half_size', '+', '1', ',', '-', 'half_size', ':', 'half_size', '+', '1', ']', '.', 'astype', '(', 'float', ')', '/', 'float', '(', 'sigma', ')', 'distance', '=', '(', 'i', '**', '2', '+', 'j', '**', '2', ')', '/', '2', 'gaussian', '=', 'np', '.', 'exp', '(', '-', 'distance', ')', '#', '# Normalize the Gaussian', '#', 'gaussian', '=', 'gaussian', '/', 'np', '.', 'sum', '(', 'gaussian', ')', 'log', '=', '(', 'distance', '-', '1', ')', '*', 'gaussian', '#', '# Normalize the kernel to have a sum of zero', '#', 'log', '=', 'log', '-', 'np', '.', 'mean', '(', 'log', ')', 'if', 'mask', 'is', 'None', ':', 'mask', '=', 'np', '.', 'ones', '(', 'image', '.', 'shape', '[', ':', '2', ']', ',', 'bool', ')', 'masked_image', '=', 'image', '.', 'copy', '(', ')', 'masked_image', '[', '~', 'mask', ']', '=', '0', 'output', '=', 'convolve', '(', 'masked_image', ',', 'log', ',', 'mode', '=', "'constant'", ',', 'cval', '=', '0', ')', '#', '# Do the LoG of the inverse of the mask. This finds the magnitude of the', '# contribution of the masked pixels. We then fudge by multiplying by the', '# value at the pixel of interest - this effectively sets the value at a', '# masked pixel to that of the pixel of interest.', '#', "# It underestimates the LoG, that's not a terrible thing.", '#', 'correction', '=', 'convolve', '(', '(', '~', 'mask', ')', '.', 'astype', '(', 'float', ')', ',', 'log', ',', 'mode', '=', "'constant'", ',', 'cval', '=', '1', ')', 'output', '+=', 'correction', '*', 'image', 'output', '[', '~', 'mask', ']', '=', 'image', '[', '~', 'mask', ']', 'return', 'output']
Perform the Laplacian of Gaussian transform on the image image - 2-d image array mask - binary mask of significant pixels size - length of side of square kernel to use sigma - standard deviation of the Gaussian
['Perform', 'the', 'Laplacian', 'of', 'Gaussian', 'transform', 'on', 'the', 'image']
train
https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/filter.py#L252-L291
8,824
ramses-tech/ramses
ramses/utils.py
singular_subresource
def singular_subresource(raml_resource, route_name): """ Determine if :raml_resource: is a singular subresource. :param raml_resource: Instance of ramlfications.raml.ResourceNode. :param route_name: Name of the :raml_resource:. """ static_parent = get_static_parent(raml_resource, method='POST') if static_parent is None: return False schema = resource_schema(static_parent) or {} properties = schema.get('properties', {}) if route_name not in properties: return False db_settings = properties[route_name].get('_db_settings', {}) is_obj = db_settings.get('type') == 'relationship' single_obj = not db_settings.get('uselist', True) return is_obj and single_obj
python
def singular_subresource(raml_resource, route_name): """ Determine if :raml_resource: is a singular subresource. :param raml_resource: Instance of ramlfications.raml.ResourceNode. :param route_name: Name of the :raml_resource:. """ static_parent = get_static_parent(raml_resource, method='POST') if static_parent is None: return False schema = resource_schema(static_parent) or {} properties = schema.get('properties', {}) if route_name not in properties: return False db_settings = properties[route_name].get('_db_settings', {}) is_obj = db_settings.get('type') == 'relationship' single_obj = not db_settings.get('uselist', True) return is_obj and single_obj
['def', 'singular_subresource', '(', 'raml_resource', ',', 'route_name', ')', ':', 'static_parent', '=', 'get_static_parent', '(', 'raml_resource', ',', 'method', '=', "'POST'", ')', 'if', 'static_parent', 'is', 'None', ':', 'return', 'False', 'schema', '=', 'resource_schema', '(', 'static_parent', ')', 'or', '{', '}', 'properties', '=', 'schema', '.', 'get', '(', "'properties'", ',', '{', '}', ')', 'if', 'route_name', 'not', 'in', 'properties', ':', 'return', 'False', 'db_settings', '=', 'properties', '[', 'route_name', ']', '.', 'get', '(', "'_db_settings'", ',', '{', '}', ')', 'is_obj', '=', 'db_settings', '.', 'get', '(', "'type'", ')', '==', "'relationship'", 'single_obj', '=', 'not', 'db_settings', '.', 'get', '(', "'uselist'", ',', 'True', ')', 'return', 'is_obj', 'and', 'single_obj']
Determine if :raml_resource: is a singular subresource. :param raml_resource: Instance of ramlfications.raml.ResourceNode. :param route_name: Name of the :raml_resource:.
['Determine', 'if', ':', 'raml_resource', ':', 'is', 'a', 'singular', 'subresource', '.']
train
https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/utils.py#L234-L251
8,825
johnnoone/aioconsul
aioconsul/client/session_endpoint.py
SessionEndpoint.renew
async def renew(self, session, *, dc=None): """Renews a TTL-based session Parameters: session (ObjectID): Session ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: ObjectMeta: where value is session Raises: NotFound: session is absent The response looks like this:: { "LockDelay": datetime.timedelta(0, 15), "Checks": [ "serfHealth" ], "Node": "foobar", "ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "CreateIndex": 1086449 "Behavior": "release", "TTL": datetime.timedelta(0, 15) } .. note:: Consul MAY return a TTL value higher than the one specified during session creation. This indicates the server is under high load and is requesting clients renew less often. """ session_id = extract_attr(session, keys=["ID"]) response = await self._api.put("/v1/session/renew", session_id, params={"dc": dc}) try: result = response.body[0] except IndexError: meta = extract_meta(response.headers) raise NotFound("No session for %r" % session_id, meta=meta) return consul(result, meta=extract_meta(response.headers))
python
async def renew(self, session, *, dc=None): """Renews a TTL-based session Parameters: session (ObjectID): Session ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: ObjectMeta: where value is session Raises: NotFound: session is absent The response looks like this:: { "LockDelay": datetime.timedelta(0, 15), "Checks": [ "serfHealth" ], "Node": "foobar", "ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "CreateIndex": 1086449 "Behavior": "release", "TTL": datetime.timedelta(0, 15) } .. note:: Consul MAY return a TTL value higher than the one specified during session creation. This indicates the server is under high load and is requesting clients renew less often. """ session_id = extract_attr(session, keys=["ID"]) response = await self._api.put("/v1/session/renew", session_id, params={"dc": dc}) try: result = response.body[0] except IndexError: meta = extract_meta(response.headers) raise NotFound("No session for %r" % session_id, meta=meta) return consul(result, meta=extract_meta(response.headers))
['async', 'def', 'renew', '(', 'self', ',', 'session', ',', '*', ',', 'dc', '=', 'None', ')', ':', 'session_id', '=', 'extract_attr', '(', 'session', ',', 'keys', '=', '[', '"ID"', ']', ')', 'response', '=', 'await', 'self', '.', '_api', '.', 'put', '(', '"/v1/session/renew"', ',', 'session_id', ',', 'params', '=', '{', '"dc"', ':', 'dc', '}', ')', 'try', ':', 'result', '=', 'response', '.', 'body', '[', '0', ']', 'except', 'IndexError', ':', 'meta', '=', 'extract_meta', '(', 'response', '.', 'headers', ')', 'raise', 'NotFound', '(', '"No session for %r"', '%', 'session_id', ',', 'meta', '=', 'meta', ')', 'return', 'consul', '(', 'result', ',', 'meta', '=', 'extract_meta', '(', 'response', '.', 'headers', ')', ')']
Renews a TTL-based session Parameters: session (ObjectID): Session ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: ObjectMeta: where value is session Raises: NotFound: session is absent The response looks like this:: { "LockDelay": datetime.timedelta(0, 15), "Checks": [ "serfHealth" ], "Node": "foobar", "ID": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "CreateIndex": 1086449 "Behavior": "release", "TTL": datetime.timedelta(0, 15) } .. note:: Consul MAY return a TTL value higher than the one specified during session creation. This indicates the server is under high load and is requesting clients renew less often.
['Renews', 'a', 'TTL', '-', 'based', 'session']
train
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/session_endpoint.py#L198-L237
8,826
pymc-devs/pymc
pymc/StepMethods.py
assign_method
def assign_method(stochastic, scale=None, verbose=-1): """ Returns a step method instance to handle a variable. If several methods have the same competence, it picks one arbitrarily (using set.pop()). """ # Retrieve set of best candidates best_candidates = pick_best_methods(stochastic) # Randomly grab and appropriate method method = best_candidates.pop() failure_header = """Failed attempting to automatically assign step method class %s to stochastic variable %s. Try setting %s's competence method to return 0 and manually assigning it when appropriate. See the user guide. Error message: """ % (method.__name__, stochastic.__name__, method.__name__) try: if scale: out = method(stochastic, scale=scale, verbose=verbose) else: out = method(stochastic, verbose=verbose) except: a, b, c = sys.exc_info() try: args = list(b.args) except AttributeError: args = [] args.append(failure_header) b.args = args six.reraise(a, b, c) return out
python
def assign_method(stochastic, scale=None, verbose=-1): """ Returns a step method instance to handle a variable. If several methods have the same competence, it picks one arbitrarily (using set.pop()). """ # Retrieve set of best candidates best_candidates = pick_best_methods(stochastic) # Randomly grab and appropriate method method = best_candidates.pop() failure_header = """Failed attempting to automatically assign step method class %s to stochastic variable %s. Try setting %s's competence method to return 0 and manually assigning it when appropriate. See the user guide. Error message: """ % (method.__name__, stochastic.__name__, method.__name__) try: if scale: out = method(stochastic, scale=scale, verbose=verbose) else: out = method(stochastic, verbose=verbose) except: a, b, c = sys.exc_info() try: args = list(b.args) except AttributeError: args = [] args.append(failure_header) b.args = args six.reraise(a, b, c) return out
['def', 'assign_method', '(', 'stochastic', ',', 'scale', '=', 'None', ',', 'verbose', '=', '-', '1', ')', ':', '# Retrieve set of best candidates', 'best_candidates', '=', 'pick_best_methods', '(', 'stochastic', ')', '# Randomly grab and appropriate method', 'method', '=', 'best_candidates', '.', 'pop', '(', ')', 'failure_header', '=', '"""Failed attempting to automatically assign step method class %s\nto stochastic variable %s. Try setting %s\'s competence method to return 0\nand manually assigning it when appropriate. See the user guide.\n\nError message: """', '%', '(', 'method', '.', '__name__', ',', 'stochastic', '.', '__name__', ',', 'method', '.', '__name__', ')', 'try', ':', 'if', 'scale', ':', 'out', '=', 'method', '(', 'stochastic', ',', 'scale', '=', 'scale', ',', 'verbose', '=', 'verbose', ')', 'else', ':', 'out', '=', 'method', '(', 'stochastic', ',', 'verbose', '=', 'verbose', ')', 'except', ':', 'a', ',', 'b', ',', 'c', '=', 'sys', '.', 'exc_info', '(', ')', 'try', ':', 'args', '=', 'list', '(', 'b', '.', 'args', ')', 'except', 'AttributeError', ':', 'args', '=', '[', ']', 'args', '.', 'append', '(', 'failure_header', ')', 'b', '.', 'args', '=', 'args', 'six', '.', 'reraise', '(', 'a', ',', 'b', ',', 'c', ')', 'return', 'out']
Returns a step method instance to handle a variable. If several methods have the same competence, it picks one arbitrarily (using set.pop()).
['Returns', 'a', 'step', 'method', 'instance', 'to', 'handle', 'a', 'variable', '.', 'If', 'several', 'methods', 'have', 'the', 'same', 'competence', 'it', 'picks', 'one', 'arbitrarily', '(', 'using', 'set', '.', 'pop', '()', ')', '.']
train
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L97-L130
8,827
pereorga/csvshuf
csvshuf/csvshuf.py
column_list
def column_list(string): """Validate and convert comma-separated list of column numbers.""" try: columns = list(map(int, string.split(','))) except ValueError as e: raise argparse.ArgumentTypeError(*e.args) for column in columns: if column < 1: raise argparse.ArgumentTypeError( 'Invalid column {!r}: column numbers start at 1.' .format(column)) return columns
python
def column_list(string): """Validate and convert comma-separated list of column numbers.""" try: columns = list(map(int, string.split(','))) except ValueError as e: raise argparse.ArgumentTypeError(*e.args) for column in columns: if column < 1: raise argparse.ArgumentTypeError( 'Invalid column {!r}: column numbers start at 1.' .format(column)) return columns
['def', 'column_list', '(', 'string', ')', ':', 'try', ':', 'columns', '=', 'list', '(', 'map', '(', 'int', ',', 'string', '.', 'split', '(', "','", ')', ')', ')', 'except', 'ValueError', 'as', 'e', ':', 'raise', 'argparse', '.', 'ArgumentTypeError', '(', '*', 'e', '.', 'args', ')', 'for', 'column', 'in', 'columns', ':', 'if', 'column', '<', '1', ':', 'raise', 'argparse', '.', 'ArgumentTypeError', '(', "'Invalid column {!r}: column numbers start at 1.'", '.', 'format', '(', 'column', ')', ')', 'return', 'columns']
Validate and convert comma-separated list of column numbers.
['Validate', 'and', 'convert', 'comma', '-', 'separated', 'list', 'of', 'column', 'numbers', '.']
train
https://github.com/pereorga/csvshuf/blob/70fdd4f512ef980bffe9cc51bfe59fea116d7c2f/csvshuf/csvshuf.py#L27-L38
8,828
COALAIP/pycoalaip
coalaip/model_validators.py
is_manifestation_model
def is_manifestation_model(instance, attribute, value): """Must include a ``manifestationOfWork`` key.""" instance_name = instance.__class__.__name__ is_creation_model(instance, attribute, value) manifestation_of = value.get('manifestationOfWork') if not isinstance(manifestation_of, str): err_str = ("'manifestationOfWork' must be given as a string in the " "'{attr}' parameter of a '{cls}'. Given " "'{value}'").format(attr=attribute.name, cls=instance_name, value=manifestation_of) print(err_str)
python
def is_manifestation_model(instance, attribute, value): """Must include a ``manifestationOfWork`` key.""" instance_name = instance.__class__.__name__ is_creation_model(instance, attribute, value) manifestation_of = value.get('manifestationOfWork') if not isinstance(manifestation_of, str): err_str = ("'manifestationOfWork' must be given as a string in the " "'{attr}' parameter of a '{cls}'. Given " "'{value}'").format(attr=attribute.name, cls=instance_name, value=manifestation_of) print(err_str)
['def', 'is_manifestation_model', '(', 'instance', ',', 'attribute', ',', 'value', ')', ':', 'instance_name', '=', 'instance', '.', '__class__', '.', '__name__', 'is_creation_model', '(', 'instance', ',', 'attribute', ',', 'value', ')', 'manifestation_of', '=', 'value', '.', 'get', '(', "'manifestationOfWork'", ')', 'if', 'not', 'isinstance', '(', 'manifestation_of', ',', 'str', ')', ':', 'err_str', '=', '(', '"\'manifestationOfWork\' must be given as a string in the "', '"\'{attr}\' parameter of a \'{cls}\'. Given "', '"\'{value}\'"', ')', '.', 'format', '(', 'attr', '=', 'attribute', '.', 'name', ',', 'cls', '=', 'instance_name', ',', 'value', '=', 'manifestation_of', ')', 'print', '(', 'err_str', ')']
Must include a ``manifestationOfWork`` key.
['Must', 'include', 'a', 'manifestationOfWork', 'key', '.']
train
https://github.com/COALAIP/pycoalaip/blob/cecc8f6ff4733f0525fafcee63647753e832f0be/coalaip/model_validators.py#L68-L81
8,829
saltstack/salt
salt/modules/cp.py
push_dir
def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
python
def push_dir(path, glob=None, upload_path=None): ''' Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf' ''' if '../' in path or not os.path.isabs(path): return False tmpupload_path = upload_path path = os.path.realpath(path) if os.path.isfile(path): return push(path, upload_path=upload_path) else: filelist = [] for root, _, files in salt.utils.path.os_walk(path): filelist += [os.path.join(root, tmpfile) for tmpfile in files] if glob is not None: filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)] if not filelist: return False for tmpfile in filelist: if upload_path and tmpfile.startswith(path): tmpupload_path = os.path.join(os.path.sep, upload_path.strip(os.path.sep), tmpfile.replace(path, '') .strip(os.path.sep)) ret = push(tmpfile, upload_path=tmpupload_path) if not ret: return ret return True
['def', 'push_dir', '(', 'path', ',', 'glob', '=', 'None', ',', 'upload_path', '=', 'None', ')', ':', 'if', "'../'", 'in', 'path', 'or', 'not', 'os', '.', 'path', '.', 'isabs', '(', 'path', ')', ':', 'return', 'False', 'tmpupload_path', '=', 'upload_path', 'path', '=', 'os', '.', 'path', '.', 'realpath', '(', 'path', ')', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'path', ')', ':', 'return', 'push', '(', 'path', ',', 'upload_path', '=', 'upload_path', ')', 'else', ':', 'filelist', '=', '[', ']', 'for', 'root', ',', '_', ',', 'files', 'in', 'salt', '.', 'utils', '.', 'path', '.', 'os_walk', '(', 'path', ')', ':', 'filelist', '+=', '[', 'os', '.', 'path', '.', 'join', '(', 'root', ',', 'tmpfile', ')', 'for', 'tmpfile', 'in', 'files', ']', 'if', 'glob', 'is', 'not', 'None', ':', 'filelist', '=', '[', 'fi', 'for', 'fi', 'in', 'filelist', 'if', 'fnmatch', '.', 'fnmatch', '(', 'os', '.', 'path', '.', 'basename', '(', 'fi', ')', ',', 'glob', ')', ']', 'if', 'not', 'filelist', ':', 'return', 'False', 'for', 'tmpfile', 'in', 'filelist', ':', 'if', 'upload_path', 'and', 'tmpfile', '.', 'startswith', '(', 'path', ')', ':', 'tmpupload_path', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'sep', ',', 'upload_path', '.', 'strip', '(', 'os', '.', 'path', '.', 'sep', ')', ',', 'tmpfile', '.', 'replace', '(', 'path', ',', "''", ')', '.', 'strip', '(', 'os', '.', 'path', '.', 'sep', ')', ')', 'ret', '=', 'push', '(', 'tmpfile', ',', 'upload_path', '=', 'tmpupload_path', ')', 'if', 'not', 'ret', ':', 'return', 'ret', 'return', 'True']
Push a directory from the minion up to the master, the files will be saved to the salt master in the master's minion files cachedir (defaults to ``/var/cache/salt/master/minions/minion-id/files``). It also has a glob for matching specific files using globbing. .. versionadded:: 2014.7.0 Since this feature allows a minion to push files up to the master server it is disabled by default for security purposes. To enable, set ``file_recv`` to ``True`` in the master configuration file, and restart the master. upload_path Provide a different path and directory name inside the master's minion files cachedir CLI Example: .. code-block:: bash salt '*' cp.push /usr/lib/mysql salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path' salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf'
['Push', 'a', 'directory', 'from', 'the', 'minion', 'up', 'to', 'the', 'master', 'the', 'files', 'will', 'be', 'saved', 'to', 'the', 'salt', 'master', 'in', 'the', 'master', 's', 'minion', 'files', 'cachedir', '(', 'defaults', 'to', '/', 'var', '/', 'cache', '/', 'salt', '/', 'master', '/', 'minions', '/', 'minion', '-', 'id', '/', 'files', ')', '.', 'It', 'also', 'has', 'a', 'glob', 'for', 'matching', 'specific', 'files', 'using', 'globbing', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cp.py#L883-L931
8,830
saltstack/salt
salt/modules/debian_ip.py
_parse_ethtool_opts
def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config
python
def _parse_ethtool_opts(opts, iface): ''' Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting. ''' config = {} if 'autoneg' in opts: if opts['autoneg'] in _CONFIG_TRUE: config.update({'autoneg': 'on'}) elif opts['autoneg'] in _CONFIG_FALSE: config.update({'autoneg': 'off'}) else: _raise_error_iface(iface, 'autoneg', _CONFIG_TRUE + _CONFIG_FALSE) if 'duplex' in opts: valid = ['full', 'half'] if opts['duplex'] in valid: config.update({'duplex': opts['duplex']}) else: _raise_error_iface(iface, 'duplex', valid) if 'speed' in opts: valid = ['10', '100', '1000', '10000'] if six.text_type(opts['speed']) in valid: config.update({'speed': opts['speed']}) else: _raise_error_iface(iface, opts['speed'], valid) valid = _CONFIG_TRUE + _CONFIG_FALSE for option in ('rx', 'tx', 'sg', 'tso', 'ufo', 'gso', 'gro', 'lro'): if option in opts: if opts[option] in _CONFIG_TRUE: config.update({option: 'on'}) elif opts[option] in _CONFIG_FALSE: config.update({option: 'off'}) else: _raise_error_iface(iface, option, valid) return config
['def', '_parse_ethtool_opts', '(', 'opts', ',', 'iface', ')', ':', 'config', '=', '{', '}', 'if', "'autoneg'", 'in', 'opts', ':', 'if', 'opts', '[', "'autoneg'", ']', 'in', '_CONFIG_TRUE', ':', 'config', '.', 'update', '(', '{', "'autoneg'", ':', "'on'", '}', ')', 'elif', 'opts', '[', "'autoneg'", ']', 'in', '_CONFIG_FALSE', ':', 'config', '.', 'update', '(', '{', "'autoneg'", ':', "'off'", '}', ')', 'else', ':', '_raise_error_iface', '(', 'iface', ',', "'autoneg'", ',', '_CONFIG_TRUE', '+', '_CONFIG_FALSE', ')', 'if', "'duplex'", 'in', 'opts', ':', 'valid', '=', '[', "'full'", ',', "'half'", ']', 'if', 'opts', '[', "'duplex'", ']', 'in', 'valid', ':', 'config', '.', 'update', '(', '{', "'duplex'", ':', 'opts', '[', "'duplex'", ']', '}', ')', 'else', ':', '_raise_error_iface', '(', 'iface', ',', "'duplex'", ',', 'valid', ')', 'if', "'speed'", 'in', 'opts', ':', 'valid', '=', '[', "'10'", ',', "'100'", ',', "'1000'", ',', "'10000'", ']', 'if', 'six', '.', 'text_type', '(', 'opts', '[', "'speed'", ']', ')', 'in', 'valid', ':', 'config', '.', 'update', '(', '{', "'speed'", ':', 'opts', '[', "'speed'", ']', '}', ')', 'else', ':', '_raise_error_iface', '(', 'iface', ',', 'opts', '[', "'speed'", ']', ',', 'valid', ')', 'valid', '=', '_CONFIG_TRUE', '+', '_CONFIG_FALSE', 'for', 'option', 'in', '(', "'rx'", ',', "'tx'", ',', "'sg'", ',', "'tso'", ',', "'ufo'", ',', "'gso'", ',', "'gro'", ',', "'lro'", ')', ':', 'if', 'option', 'in', 'opts', ':', 'if', 'opts', '[', 'option', ']', 'in', '_CONFIG_TRUE', ':', 'config', '.', 'update', '(', '{', 'option', ':', "'on'", '}', ')', 'elif', 'opts', '[', 'option', ']', 'in', '_CONFIG_FALSE', ':', 'config', '.', 'update', '(', '{', 'option', ':', "'off'", '}', ')', 'else', ':', '_raise_error_iface', '(', 'iface', ',', 'option', ',', 'valid', ')', 'return', 'config']
Filters given options and outputs valid settings for ETHTOOLS_OPTS If an option has a value that is not expected, this function will log what the Interface, Setting and what it was expecting.
['Filters', 'given', 'options', 'and', 'outputs', 'valid', 'settings', 'for', 'ETHTOOLS_OPTS', 'If', 'an', 'option', 'has', 'a', 'value', 'that', 'is', 'not', 'expected', 'this', 'function', 'will', 'log', 'what', 'the', 'Interface', 'Setting', 'and', 'what', 'it', 'was', 'expecting', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/debian_ip.py#L695-L736
8,831
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_fabric_service_rpc/show_linkinfo/output/show_link_info/linkinfo_isl/__init__.py
linkinfo_isl._set_linkinfo_isllink_srcport_type
def _set_linkinfo_isllink_srcport_type(self, v, load=False): """ Setter method for linkinfo_isllink_srcport_type, mapped from YANG variable /brocade_fabric_service_rpc/show_linkinfo/output/show_link_info/linkinfo_isl/linkinfo_isllink_srcport_type (interfacetype-type) If this variable is read-only (config: false) in the source YANG file, then _set_linkinfo_isllink_srcport_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_linkinfo_isllink_srcport_type() directly. YANG Description: Source port/interface type. It can take the following values: Te - for 10G Ethernet ports. Fi - for Fibre Channel ports. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'Te|Fi', 'length': [u'2']}), is_leaf=True, yang_name="linkinfo-isllink-srcport-type", rest_name="linkinfo-isllink-srcport-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Source port/interface type'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='interfacetype-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """linkinfo_isllink_srcport_type must be of a type compatible with interfacetype-type""", 'defined-type': "brocade-fabric-service:interfacetype-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'Te|Fi', 'length': [u'2']}), is_leaf=True, yang_name="linkinfo-isllink-srcport-type", rest_name="linkinfo-isllink-srcport-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Source port/interface type'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='interfacetype-type', is_config=True)""", }) self.__linkinfo_isllink_srcport_type = t if hasattr(self, '_set'): self._set()
python
def _set_linkinfo_isllink_srcport_type(self, v, load=False): """ Setter method for linkinfo_isllink_srcport_type, mapped from YANG variable /brocade_fabric_service_rpc/show_linkinfo/output/show_link_info/linkinfo_isl/linkinfo_isllink_srcport_type (interfacetype-type) If this variable is read-only (config: false) in the source YANG file, then _set_linkinfo_isllink_srcport_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_linkinfo_isllink_srcport_type() directly. YANG Description: Source port/interface type. It can take the following values: Te - for 10G Ethernet ports. Fi - for Fibre Channel ports. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'Te|Fi', 'length': [u'2']}), is_leaf=True, yang_name="linkinfo-isllink-srcport-type", rest_name="linkinfo-isllink-srcport-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Source port/interface type'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='interfacetype-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """linkinfo_isllink_srcport_type must be of a type compatible with interfacetype-type""", 'defined-type': "brocade-fabric-service:interfacetype-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'Te|Fi', 'length': [u'2']}), is_leaf=True, yang_name="linkinfo-isllink-srcport-type", rest_name="linkinfo-isllink-srcport-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'Source port/interface type'}}, namespace='urn:brocade.com:mgmt:brocade-fabric-service', defining_module='brocade-fabric-service', yang_type='interfacetype-type', is_config=True)""", }) self.__linkinfo_isllink_srcport_type = t if hasattr(self, '_set'): self._set()
['def', '_set_linkinfo_isllink_srcport_type', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'RestrictedClassType', '(', 'base_type', '=', 'unicode', ',', 'restriction_dict', '=', '{', "'pattern'", ':', "u'Te|Fi'", ',', "'length'", ':', '[', "u'2'", ']', '}', ')', ',', 'is_leaf', '=', 'True', ',', 'yang_name', '=', '"linkinfo-isllink-srcport-type"', ',', 'rest_name', '=', '"linkinfo-isllink-srcport-type"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'False', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Source port/interface type'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-fabric-service'", ',', 'defining_module', '=', "'brocade-fabric-service'", ',', 'yang_type', '=', "'interfacetype-type'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""linkinfo_isllink_srcport_type must be of a type compatible with interfacetype-type"""', ',', "'defined-type'", ':', '"brocade-fabric-service:interfacetype-type"', ',', "'generated-type'", ':', '"""YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={\'pattern\': u\'Te|Fi\', \'length\': [u\'2\']}), is_leaf=True, yang_name="linkinfo-isllink-srcport-type", rest_name="linkinfo-isllink-srcport-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u\'tailf-common\': {u\'info\': u\'Source port/interface type\'}}, namespace=\'urn:brocade.com:mgmt:brocade-fabric-service\', defining_module=\'brocade-fabric-service\', yang_type=\'interfacetype-type\', is_config=True)"""', ',', '}', ')', 'self', '.', '__linkinfo_isllink_srcport_type', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for linkinfo_isllink_srcport_type, mapped from YANG variable /brocade_fabric_service_rpc/show_linkinfo/output/show_link_info/linkinfo_isl/linkinfo_isllink_srcport_type (interfacetype-type) If this variable is read-only (config: false) in the source YANG file, then _set_linkinfo_isllink_srcport_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_linkinfo_isllink_srcport_type() directly. YANG Description: Source port/interface type. It can take the following values: Te - for 10G Ethernet ports. Fi - for Fibre Channel ports.
['Setter', 'method', 'for', 'linkinfo_isllink_srcport_type', 'mapped', 'from', 'YANG', 'variable', '/', 'brocade_fabric_service_rpc', '/', 'show_linkinfo', '/', 'output', '/', 'show_link_info', '/', 'linkinfo_isl', '/', 'linkinfo_isllink_srcport_type', '(', 'interfacetype', '-', 'type', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_linkinfo_isllink_srcport_type', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_linkinfo_isllink_srcport_type', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_fabric_service_rpc/show_linkinfo/output/show_link_info/linkinfo_isl/__init__.py#L225-L251
8,832
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_nameserver_rpc/get_nameserver_detail/output/show_nameserver/__init__.py
show_nameserver._set_nameserver_cos
def _set_nameserver_cos(self, v, load=False): """ Setter method for nameserver_cos, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail/output/show_nameserver/nameserver_cos (nameserver-cos-type) If this variable is read-only (config: false) in the source YANG file, then _set_nameserver_cos is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_nameserver_cos() directly. YANG Description: Indicates the Fibre Channel Class of Service supported by the device. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'F|1|2|3|,', 'length': [u'0..8']}), is_leaf=True, yang_name="nameserver-cos", rest_name="nameserver-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'class of service'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='nameserver-cos-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """nameserver_cos must be of a type compatible with nameserver-cos-type""", 'defined-type': "brocade-nameserver:nameserver-cos-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'F|1|2|3|,', 'length': [u'0..8']}), is_leaf=True, yang_name="nameserver-cos", rest_name="nameserver-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'class of service'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='nameserver-cos-type', is_config=True)""", }) self.__nameserver_cos = t if hasattr(self, '_set'): self._set()
python
def _set_nameserver_cos(self, v, load=False): """ Setter method for nameserver_cos, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail/output/show_nameserver/nameserver_cos (nameserver-cos-type) If this variable is read-only (config: false) in the source YANG file, then _set_nameserver_cos is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_nameserver_cos() directly. YANG Description: Indicates the Fibre Channel Class of Service supported by the device. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'F|1|2|3|,', 'length': [u'0..8']}), is_leaf=True, yang_name="nameserver-cos", rest_name="nameserver-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'class of service'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='nameserver-cos-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """nameserver_cos must be of a type compatible with nameserver-cos-type""", 'defined-type': "brocade-nameserver:nameserver-cos-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'F|1|2|3|,', 'length': [u'0..8']}), is_leaf=True, yang_name="nameserver-cos", rest_name="nameserver-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'info': u'class of service'}}, namespace='urn:brocade.com:mgmt:brocade-nameserver', defining_module='brocade-nameserver', yang_type='nameserver-cos-type', is_config=True)""", }) self.__nameserver_cos = t if hasattr(self, '_set'): self._set()
['def', '_set_nameserver_cos', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'RestrictedClassType', '(', 'base_type', '=', 'unicode', ',', 'restriction_dict', '=', '{', "'pattern'", ':', "u'F|1|2|3|,'", ',', "'length'", ':', '[', "u'0..8'", ']', '}', ')', ',', 'is_leaf', '=', 'True', ',', 'yang_name', '=', '"nameserver-cos"', ',', 'rest_name', '=', '"nameserver-cos"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'False', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'class of service'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-nameserver'", ',', 'defining_module', '=', "'brocade-nameserver'", ',', 'yang_type', '=', "'nameserver-cos-type'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""nameserver_cos must be of a type compatible with nameserver-cos-type"""', ',', "'defined-type'", ':', '"brocade-nameserver:nameserver-cos-type"', ',', "'generated-type'", ':', '"""YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={\'pattern\': u\'F|1|2|3|,\', \'length\': [u\'0..8\']}), is_leaf=True, yang_name="nameserver-cos", rest_name="nameserver-cos", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u\'tailf-common\': {u\'info\': u\'class of service\'}}, namespace=\'urn:brocade.com:mgmt:brocade-nameserver\', defining_module=\'brocade-nameserver\', yang_type=\'nameserver-cos-type\', is_config=True)"""', ',', '}', ')', 'self', '.', '__nameserver_cos', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for nameserver_cos, mapped from YANG variable /brocade_nameserver_rpc/get_nameserver_detail/output/show_nameserver/nameserver_cos (nameserver-cos-type) If this variable is read-only (config: false) in the source YANG file, then _set_nameserver_cos is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_nameserver_cos() directly. YANG Description: Indicates the Fibre Channel Class of Service supported by the device.
['Setter', 'method', 'for', 'nameserver_cos', 'mapped', 'from', 'YANG', 'variable', '/', 'brocade_nameserver_rpc', '/', 'get_nameserver_detail', '/', 'output', '/', 'show_nameserver', '/', 'nameserver_cos', '(', 'nameserver', '-', 'cos', '-', 'type', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_nameserver_cos', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_nameserver_cos', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_nameserver_rpc/get_nameserver_detail/output/show_nameserver/__init__.py#L586-L610
8,833
codeinn/vcs
vcs/cli.py
BaseCommand.execute
def execute(self, *args, **options): """ Executes whole process of parsing arguments, running command and trying to catch errors. """ try: self.handle(*args, **options) except CommandError, e: if options['debug']: try: import ipdb ipdb.set_trace() except ImportError: import pdb pdb.set_trace() sys.stderr.write(colorize('ERROR: ', fg='red')) self.stderr.write('%s\n' % e) sys.exit(1) except Exception, e: if isinstance(e, IOError) and getattr(e, 'errno') == errno.EPIPE: sys.exit(0) if options['debug']: try: import ipdb ipdb.set_trace() except ImportError: import pdb pdb.set_trace() if options.get('traceback'): import traceback self.stderr.write(u'\n'.join(( '=========', 'TRACEBACK', '=========', '', '', ))) traceback.print_exc(file=self.stderr) sys.stderr.write(colorize('ERROR: ', fg='red')) self.stderr.write('%s\n' % e) sys.exit(1)
python
def execute(self, *args, **options): """ Executes whole process of parsing arguments, running command and trying to catch errors. """ try: self.handle(*args, **options) except CommandError, e: if options['debug']: try: import ipdb ipdb.set_trace() except ImportError: import pdb pdb.set_trace() sys.stderr.write(colorize('ERROR: ', fg='red')) self.stderr.write('%s\n' % e) sys.exit(1) except Exception, e: if isinstance(e, IOError) and getattr(e, 'errno') == errno.EPIPE: sys.exit(0) if options['debug']: try: import ipdb ipdb.set_trace() except ImportError: import pdb pdb.set_trace() if options.get('traceback'): import traceback self.stderr.write(u'\n'.join(( '=========', 'TRACEBACK', '=========', '', '', ))) traceback.print_exc(file=self.stderr) sys.stderr.write(colorize('ERROR: ', fg='red')) self.stderr.write('%s\n' % e) sys.exit(1)
['def', 'execute', '(', 'self', ',', '*', 'args', ',', '*', '*', 'options', ')', ':', 'try', ':', 'self', '.', 'handle', '(', '*', 'args', ',', '*', '*', 'options', ')', 'except', 'CommandError', ',', 'e', ':', 'if', 'options', '[', "'debug'", ']', ':', 'try', ':', 'import', 'ipdb', 'ipdb', '.', 'set_trace', '(', ')', 'except', 'ImportError', ':', 'import', 'pdb', 'pdb', '.', 'set_trace', '(', ')', 'sys', '.', 'stderr', '.', 'write', '(', 'colorize', '(', "'ERROR: '", ',', 'fg', '=', "'red'", ')', ')', 'self', '.', 'stderr', '.', 'write', '(', "'%s\\n'", '%', 'e', ')', 'sys', '.', 'exit', '(', '1', ')', 'except', 'Exception', ',', 'e', ':', 'if', 'isinstance', '(', 'e', ',', 'IOError', ')', 'and', 'getattr', '(', 'e', ',', "'errno'", ')', '==', 'errno', '.', 'EPIPE', ':', 'sys', '.', 'exit', '(', '0', ')', 'if', 'options', '[', "'debug'", ']', ':', 'try', ':', 'import', 'ipdb', 'ipdb', '.', 'set_trace', '(', ')', 'except', 'ImportError', ':', 'import', 'pdb', 'pdb', '.', 'set_trace', '(', ')', 'if', 'options', '.', 'get', '(', "'traceback'", ')', ':', 'import', 'traceback', 'self', '.', 'stderr', '.', 'write', '(', "u'\\n'", '.', 'join', '(', '(', "'========='", ',', "'TRACEBACK'", ',', "'========='", ',', "''", ',', "''", ',', ')', ')', ')', 'traceback', '.', 'print_exc', '(', 'file', '=', 'self', '.', 'stderr', ')', 'sys', '.', 'stderr', '.', 'write', '(', 'colorize', '(', "'ERROR: '", ',', 'fg', '=', "'red'", ')', ')', 'self', '.', 'stderr', '.', 'write', '(', "'%s\\n'", '%', 'e', ')', 'sys', '.', 'exit', '(', '1', ')']
Executes whole process of parsing arguments, running command and trying to catch errors.
['Executes', 'whole', 'process', 'of', 'parsing', 'arguments', 'running', 'command', 'and', 'trying', 'to', 'catch', 'errors', '.']
train
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/cli.py#L237-L275
8,834
sci-bots/pygtkhelpers
pygtkhelpers/ui/views/shapes_canvas_view.py
GtkShapesCanvasView.create_ui
def create_ui(self): ''' .. versionchanged:: 0.20 Debounce window expose and resize handlers to improve responsiveness. .. versionchanged:: X.X.X Call debounced `_on_expose_event` handler on _leading_ edge to make UI update more responsive when, e.g., changing window focus. Decrease debounce time to 250 ms. ''' super(GtkShapesCanvasView, self).create_ui() self.widget.set_events(gtk.gdk.BUTTON_PRESS | gtk.gdk.BUTTON_RELEASE | gtk.gdk.BUTTON_MOTION_MASK | gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK) self._dirty_check_timeout_id = gtk.timeout_add(30, self.check_dirty) self.resize = Debounce(self._resize, wait=250) debounced_on_expose_event = Debounce(self._on_expose_event, wait=250, leading=True, trailing=True) self.widget.connect('expose-event', debounced_on_expose_event)
python
def create_ui(self): ''' .. versionchanged:: 0.20 Debounce window expose and resize handlers to improve responsiveness. .. versionchanged:: X.X.X Call debounced `_on_expose_event` handler on _leading_ edge to make UI update more responsive when, e.g., changing window focus. Decrease debounce time to 250 ms. ''' super(GtkShapesCanvasView, self).create_ui() self.widget.set_events(gtk.gdk.BUTTON_PRESS | gtk.gdk.BUTTON_RELEASE | gtk.gdk.BUTTON_MOTION_MASK | gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK) self._dirty_check_timeout_id = gtk.timeout_add(30, self.check_dirty) self.resize = Debounce(self._resize, wait=250) debounced_on_expose_event = Debounce(self._on_expose_event, wait=250, leading=True, trailing=True) self.widget.connect('expose-event', debounced_on_expose_event)
['def', 'create_ui', '(', 'self', ')', ':', 'super', '(', 'GtkShapesCanvasView', ',', 'self', ')', '.', 'create_ui', '(', ')', 'self', '.', 'widget', '.', 'set_events', '(', 'gtk', '.', 'gdk', '.', 'BUTTON_PRESS', '|', 'gtk', '.', 'gdk', '.', 'BUTTON_RELEASE', '|', 'gtk', '.', 'gdk', '.', 'BUTTON_MOTION_MASK', '|', 'gtk', '.', 'gdk', '.', 'BUTTON_PRESS_MASK', '|', 'gtk', '.', 'gdk', '.', 'BUTTON_RELEASE_MASK', '|', 'gtk', '.', 'gdk', '.', 'POINTER_MOTION_HINT_MASK', ')', 'self', '.', '_dirty_check_timeout_id', '=', 'gtk', '.', 'timeout_add', '(', '30', ',', 'self', '.', 'check_dirty', ')', 'self', '.', 'resize', '=', 'Debounce', '(', 'self', '.', '_resize', ',', 'wait', '=', '250', ')', 'debounced_on_expose_event', '=', 'Debounce', '(', 'self', '.', '_on_expose_event', ',', 'wait', '=', '250', ',', 'leading', '=', 'True', ',', 'trailing', '=', 'True', ')', 'self', '.', 'widget', '.', 'connect', '(', "'expose-event'", ',', 'debounced_on_expose_event', ')']
.. versionchanged:: 0.20 Debounce window expose and resize handlers to improve responsiveness. .. versionchanged:: X.X.X Call debounced `_on_expose_event` handler on _leading_ edge to make UI update more responsive when, e.g., changing window focus. Decrease debounce time to 250 ms.
['..', 'versionchanged', '::', '0', '.', '20', 'Debounce', 'window', 'expose', 'and', 'resize', 'handlers', 'to', 'improve', 'responsiveness', '.']
train
https://github.com/sci-bots/pygtkhelpers/blob/3a6e6d6340221c686229cd1c951d7537dae81b07/pygtkhelpers/ui/views/shapes_canvas_view.py#L43-L67
8,835
aio-libs/aioredis
aioredis/sentinel/pool.py
SentinelPool.discover
async def discover(self, timeout=None): # TODO: better name? """Discover sentinels and all monitored services within given timeout. If no sentinels discovered within timeout: TimeoutError is raised. If some sentinels were discovered but not all — it is ok. If not all monitored services (masters/slaves) discovered (or connections established) — it is ok. TBD: what if some sentinels/services unreachable; """ # TODO: check not closed # TODO: discovery must be done with some customizable timeout. if timeout is None: timeout = self.discover_timeout tasks = [] pools = [] for addr in self._sentinels: # iterate over unordered set tasks.append(self._connect_sentinel(addr, timeout, pools)) done, pending = await asyncio.wait(tasks, loop=self._loop, return_when=ALL_COMPLETED) assert not pending, ("Expected all tasks to complete", done, pending) for task in done: result = task.result() if isinstance(result, Exception): continue # FIXME if not pools: raise Exception("Could not connect to any sentinel") pools, self._pools[:] = self._pools[:], pools # TODO: close current connections for pool in pools: pool.close() await pool.wait_closed() # TODO: discover peer sentinels for pool in self._pools: await pool.execute_pubsub( b'psubscribe', self._monitor.pattern('*'))
python
async def discover(self, timeout=None): # TODO: better name? """Discover sentinels and all monitored services within given timeout. If no sentinels discovered within timeout: TimeoutError is raised. If some sentinels were discovered but not all — it is ok. If not all monitored services (masters/slaves) discovered (or connections established) — it is ok. TBD: what if some sentinels/services unreachable; """ # TODO: check not closed # TODO: discovery must be done with some customizable timeout. if timeout is None: timeout = self.discover_timeout tasks = [] pools = [] for addr in self._sentinels: # iterate over unordered set tasks.append(self._connect_sentinel(addr, timeout, pools)) done, pending = await asyncio.wait(tasks, loop=self._loop, return_when=ALL_COMPLETED) assert not pending, ("Expected all tasks to complete", done, pending) for task in done: result = task.result() if isinstance(result, Exception): continue # FIXME if not pools: raise Exception("Could not connect to any sentinel") pools, self._pools[:] = self._pools[:], pools # TODO: close current connections for pool in pools: pool.close() await pool.wait_closed() # TODO: discover peer sentinels for pool in self._pools: await pool.execute_pubsub( b'psubscribe', self._monitor.pattern('*'))
['async', 'def', 'discover', '(', 'self', ',', 'timeout', '=', 'None', ')', ':', '# TODO: better name?', '# TODO: check not closed', '# TODO: discovery must be done with some customizable timeout.', 'if', 'timeout', 'is', 'None', ':', 'timeout', '=', 'self', '.', 'discover_timeout', 'tasks', '=', '[', ']', 'pools', '=', '[', ']', 'for', 'addr', 'in', 'self', '.', '_sentinels', ':', '# iterate over unordered set', 'tasks', '.', 'append', '(', 'self', '.', '_connect_sentinel', '(', 'addr', ',', 'timeout', ',', 'pools', ')', ')', 'done', ',', 'pending', '=', 'await', 'asyncio', '.', 'wait', '(', 'tasks', ',', 'loop', '=', 'self', '.', '_loop', ',', 'return_when', '=', 'ALL_COMPLETED', ')', 'assert', 'not', 'pending', ',', '(', '"Expected all tasks to complete"', ',', 'done', ',', 'pending', ')', 'for', 'task', 'in', 'done', ':', 'result', '=', 'task', '.', 'result', '(', ')', 'if', 'isinstance', '(', 'result', ',', 'Exception', ')', ':', 'continue', '# FIXME', 'if', 'not', 'pools', ':', 'raise', 'Exception', '(', '"Could not connect to any sentinel"', ')', 'pools', ',', 'self', '.', '_pools', '[', ':', ']', '=', 'self', '.', '_pools', '[', ':', ']', ',', 'pools', '# TODO: close current connections', 'for', 'pool', 'in', 'pools', ':', 'pool', '.', 'close', '(', ')', 'await', 'pool', '.', 'wait_closed', '(', ')', '# TODO: discover peer sentinels', 'for', 'pool', 'in', 'self', '.', '_pools', ':', 'await', 'pool', '.', 'execute_pubsub', '(', "b'psubscribe'", ',', 'self', '.', '_monitor', '.', 'pattern', '(', "'*'", ')', ')']
Discover sentinels and all monitored services within given timeout. If no sentinels discovered within timeout: TimeoutError is raised. If some sentinels were discovered but not all — it is ok. If not all monitored services (masters/slaves) discovered (or connections established) — it is ok. TBD: what if some sentinels/services unreachable;
['Discover', 'sentinels', 'and', 'all', 'monitored', 'services', 'within', 'given', 'timeout', '.']
train
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/sentinel/pool.py#L192-L228
8,836
gmr/tredis
tredis/sets.py
SetsMixin.sdiffstore
def sdiffstore(self, destination, *keys): """This command is equal to :meth:`~tredis.RedisClient.sdiff`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the total number of elements in all given sets. :param destination: The set to store the diff into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SDIFFSTORE', destination] + list(keys))
python
def sdiffstore(self, destination, *keys): """This command is equal to :meth:`~tredis.RedisClient.sdiff`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the total number of elements in all given sets. :param destination: The set to store the diff into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError` """ return self._execute([b'SDIFFSTORE', destination] + list(keys))
['def', 'sdiffstore', '(', 'self', ',', 'destination', ',', '*', 'keys', ')', ':', 'return', 'self', '.', '_execute', '(', '[', "b'SDIFFSTORE'", ',', 'destination', ']', '+', 'list', '(', 'keys', ')', ')']
This command is equal to :meth:`~tredis.RedisClient.sdiff`, but instead of returning the resulting set, it is stored in destination. If destination already exists, it is overwritten. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the total number of elements in all given sets. :param destination: The set to store the diff into :type destination: :class:`str`, :class:`bytes` :param keys: One or more set keys as positional arguments :type keys: :class:`str`, :class:`bytes` :rtype: int :raises: :exc:`~tredis.exceptions.RedisError`
['This', 'command', 'is', 'equal', 'to', ':', 'meth', ':', '~tredis', '.', 'RedisClient', '.', 'sdiff', 'but', 'instead', 'of', 'returning', 'the', 'resulting', 'set', 'it', 'is', 'stored', 'in', 'destination', '.']
train
https://github.com/gmr/tredis/blob/2e91c6a58a35460be0525c51ac6a98fde3b506ad/tredis/sets.py#L82-L101
8,837
huge-success/sanic
sanic/response.py
StreamingHTTPResponse.stream
async def stream( self, version="1.1", keep_alive=False, keep_alive_timeout=None ): """Streams headers, runs the `streaming_fn` callback that writes content to the response body, then finalizes the response body. """ headers = self.get_headers( version, keep_alive=keep_alive, keep_alive_timeout=keep_alive_timeout, ) self.protocol.push_data(headers) await self.protocol.drain() await self.streaming_fn(self) self.protocol.push_data(b"0\r\n\r\n")
python
async def stream( self, version="1.1", keep_alive=False, keep_alive_timeout=None ): """Streams headers, runs the `streaming_fn` callback that writes content to the response body, then finalizes the response body. """ headers = self.get_headers( version, keep_alive=keep_alive, keep_alive_timeout=keep_alive_timeout, ) self.protocol.push_data(headers) await self.protocol.drain() await self.streaming_fn(self) self.protocol.push_data(b"0\r\n\r\n")
['async', 'def', 'stream', '(', 'self', ',', 'version', '=', '"1.1"', ',', 'keep_alive', '=', 'False', ',', 'keep_alive_timeout', '=', 'None', ')', ':', 'headers', '=', 'self', '.', 'get_headers', '(', 'version', ',', 'keep_alive', '=', 'keep_alive', ',', 'keep_alive_timeout', '=', 'keep_alive_timeout', ',', ')', 'self', '.', 'protocol', '.', 'push_data', '(', 'headers', ')', 'await', 'self', '.', 'protocol', '.', 'drain', '(', ')', 'await', 'self', '.', 'streaming_fn', '(', 'self', ')', 'self', '.', 'protocol', '.', 'push_data', '(', 'b"0\\r\\n\\r\\n"', ')']
Streams headers, runs the `streaming_fn` callback that writes content to the response body, then finalizes the response body.
['Streams', 'headers', 'runs', 'the', 'streaming_fn', 'callback', 'that', 'writes', 'content', 'to', 'the', 'response', 'body', 'then', 'finalizes', 'the', 'response', 'body', '.']
train
https://github.com/huge-success/sanic/blob/6a4a3f617fdbe1d3ee8bdc9d1b12ad2d0b34acdd/sanic/response.py#L85-L99
8,838
ottogroup/palladium
palladium/server.py
PredictStream.listen
def listen(self, io_in, io_out, io_err): """Listens to provided io stream and writes predictions to output. In case of errors, the error stream will be used. """ for line in io_in: if line.strip().lower() == 'exit': break try: y_pred = self.process_line(line) except Exception as e: io_out.write('[]\n') io_err.write( "Error while processing input row: {}" "{}: {}\n".format(line, type(e), e)) io_err.flush() else: io_out.write(ujson.dumps(y_pred.tolist())) io_out.write('\n') io_out.flush()
python
def listen(self, io_in, io_out, io_err): """Listens to provided io stream and writes predictions to output. In case of errors, the error stream will be used. """ for line in io_in: if line.strip().lower() == 'exit': break try: y_pred = self.process_line(line) except Exception as e: io_out.write('[]\n') io_err.write( "Error while processing input row: {}" "{}: {}\n".format(line, type(e), e)) io_err.flush() else: io_out.write(ujson.dumps(y_pred.tolist())) io_out.write('\n') io_out.flush()
['def', 'listen', '(', 'self', ',', 'io_in', ',', 'io_out', ',', 'io_err', ')', ':', 'for', 'line', 'in', 'io_in', ':', 'if', 'line', '.', 'strip', '(', ')', '.', 'lower', '(', ')', '==', "'exit'", ':', 'break', 'try', ':', 'y_pred', '=', 'self', '.', 'process_line', '(', 'line', ')', 'except', 'Exception', 'as', 'e', ':', 'io_out', '.', 'write', '(', "'[]\\n'", ')', 'io_err', '.', 'write', '(', '"Error while processing input row: {}"', '"{}: {}\\n"', '.', 'format', '(', 'line', ',', 'type', '(', 'e', ')', ',', 'e', ')', ')', 'io_err', '.', 'flush', '(', ')', 'else', ':', 'io_out', '.', 'write', '(', 'ujson', '.', 'dumps', '(', 'y_pred', '.', 'tolist', '(', ')', ')', ')', 'io_out', '.', 'write', '(', "'\\n'", ')', 'io_out', '.', 'flush', '(', ')']
Listens to provided io stream and writes predictions to output. In case of errors, the error stream will be used.
['Listens', 'to', 'provided', 'io', 'stream', 'and', 'writes', 'predictions', 'to', 'output', '.', 'In', 'case', 'of', 'errors', 'the', 'error', 'stream', 'will', 'be', 'used', '.']
train
https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L340-L359
8,839
kwikteam/phy
phy/gui/widgets.py
HTMLWidget.build
def build(self): """Build the full HTML source.""" if self.is_built(): # pragma: no cover return with _wait_signal(self.loadFinished, 20): self.rebuild() self._built = True
python
def build(self): """Build the full HTML source.""" if self.is_built(): # pragma: no cover return with _wait_signal(self.loadFinished, 20): self.rebuild() self._built = True
['def', 'build', '(', 'self', ')', ':', 'if', 'self', '.', 'is_built', '(', ')', ':', '# pragma: no cover', 'return', 'with', '_wait_signal', '(', 'self', '.', 'loadFinished', ',', '20', ')', ':', 'self', '.', 'rebuild', '(', ')', 'self', '.', '_built', '=', 'True']
Build the full HTML source.
['Build', 'the', 'full', 'HTML', 'source', '.']
train
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/gui/widgets.py#L171-L177
8,840
jfilter/text-classification-keras
texcla/embeddings.py
build_embedding_weights
def build_embedding_weights(word_index, embeddings_index): """Builds an embedding matrix for all words in vocab using embeddings_index """ logger.info('Loading embeddings for all words in the corpus') embedding_dim = list(embeddings_index.values())[0].shape[-1] # setting special tokens such as UNK and PAD to 0 # all other words are also set to 0. embedding_weights = np.zeros((len(word_index), embedding_dim)) for word, i in word_index.items(): word_vector = embeddings_index.get(word) if word_vector is not None: embedding_weights[i] = word_vector return embedding_weights
python
def build_embedding_weights(word_index, embeddings_index): """Builds an embedding matrix for all words in vocab using embeddings_index """ logger.info('Loading embeddings for all words in the corpus') embedding_dim = list(embeddings_index.values())[0].shape[-1] # setting special tokens such as UNK and PAD to 0 # all other words are also set to 0. embedding_weights = np.zeros((len(word_index), embedding_dim)) for word, i in word_index.items(): word_vector = embeddings_index.get(word) if word_vector is not None: embedding_weights[i] = word_vector return embedding_weights
['def', 'build_embedding_weights', '(', 'word_index', ',', 'embeddings_index', ')', ':', 'logger', '.', 'info', '(', "'Loading embeddings for all words in the corpus'", ')', 'embedding_dim', '=', 'list', '(', 'embeddings_index', '.', 'values', '(', ')', ')', '[', '0', ']', '.', 'shape', '[', '-', '1', ']', '# setting special tokens such as UNK and PAD to 0', '# all other words are also set to 0.', 'embedding_weights', '=', 'np', '.', 'zeros', '(', '(', 'len', '(', 'word_index', ')', ',', 'embedding_dim', ')', ')', 'for', 'word', ',', 'i', 'in', 'word_index', '.', 'items', '(', ')', ':', 'word_vector', '=', 'embeddings_index', '.', 'get', '(', 'word', ')', 'if', 'word_vector', 'is', 'not', 'None', ':', 'embedding_weights', '[', 'i', ']', '=', 'word_vector', 'return', 'embedding_weights']
Builds an embedding matrix for all words in vocab using embeddings_index
['Builds', 'an', 'embedding', 'matrix', 'for', 'all', 'words', 'in', 'vocab', 'using', 'embeddings_index']
train
https://github.com/jfilter/text-classification-keras/blob/a59c652805da41d18937c7fdad0d9fd943cf8578/texcla/embeddings.py#L146-L161
8,841
wandb/client
wandb/__init__.py
reset_env
def reset_env(exclude=[]): """Remove environment variables, used in Jupyter notebooks""" if os.getenv(env.INITED): wandb_keys = [key for key in os.environ.keys() if key.startswith( 'WANDB_') and key not in exclude] for key in wandb_keys: del os.environ[key] return True else: return False
python
def reset_env(exclude=[]): """Remove environment variables, used in Jupyter notebooks""" if os.getenv(env.INITED): wandb_keys = [key for key in os.environ.keys() if key.startswith( 'WANDB_') and key not in exclude] for key in wandb_keys: del os.environ[key] return True else: return False
['def', 'reset_env', '(', 'exclude', '=', '[', ']', ')', ':', 'if', 'os', '.', 'getenv', '(', 'env', '.', 'INITED', ')', ':', 'wandb_keys', '=', '[', 'key', 'for', 'key', 'in', 'os', '.', 'environ', '.', 'keys', '(', ')', 'if', 'key', '.', 'startswith', '(', "'WANDB_'", ')', 'and', 'key', 'not', 'in', 'exclude', ']', 'for', 'key', 'in', 'wandb_keys', ':', 'del', 'os', '.', 'environ', '[', 'key', ']', 'return', 'True', 'else', ':', 'return', 'False']
Remove environment variables, used in Jupyter notebooks
['Remove', 'environment', 'variables', 'used', 'in', 'Jupyter', 'notebooks']
train
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/__init__.py#L498-L507
8,842
google/importlab
importlab/resolve.py
Resolver.resolve_import
def resolve_import(self, item): """Simulate how Python resolves imports. Returns the filename of the source file Python would load when processing a statement like 'import name' in the module we're currently under. Args: item: An instance of ImportItem Returns: A filename Raises: ImportException: If the module doesn't exist. """ name = item.name # The last part in `from a.b.c import d` might be a symbol rather than a # module, so we try a.b.c and a.b.c.d as names. short_name = None if item.is_from and not item.is_star: if '.' in name.lstrip('.'): # The name is something like `a.b.c`, so strip off `.c`. rindex = name.rfind('.') else: # The name is something like `..c`, so strip off just `c`. rindex = name.rfind('.') + 1 short_name = name[:rindex] if import_finder.is_builtin(name): filename = name + '.so' return Builtin(filename, name) filename, level = convert_to_path(name) if level: # This is a relative import; we need to resolve the filename # relative to the importing file path. filename = os.path.normpath( os.path.join(self.current_directory, filename)) files = [(name, filename)] if short_name: short_filename = os.path.dirname(filename) files.append((short_name, short_filename)) for module_name, path in files: for fs in self.fs_path: f = self._find_file(fs, path) if not f or f == self.current_module.path: # We cannot import a file from itself. continue if item.is_relative(): package_name = self.current_module.package_name if package_name is None: # Relative import in non-package raise ImportException(name) module_name = get_absolute_name(package_name, module_name) if isinstance(self.current_module, System): return System(f, module_name) return Local(f, module_name, fs) # If the module isn't found in the explicit pythonpath, see if python # itself resolved it. if item.source: prefix, ext = os.path.splitext(item.source) mod_name = name # We need to check for importing a symbol here too. if short_name: mod = prefix.replace(os.path.sep, '.') mod = utils.strip_suffix(mod, '.__init__') if not mod.endswith(name) and mod.endswith(short_name): mod_name = short_name if ext == '.pyc': pyfile = prefix + '.py' if os.path.exists(pyfile): return System(pyfile, mod_name) elif not ext: pyfile = os.path.join(prefix, "__init__.py") if os.path.exists(pyfile): return System(pyfile, mod_name) return System(item.source, mod_name) raise ImportException(name)
python
def resolve_import(self, item): """Simulate how Python resolves imports. Returns the filename of the source file Python would load when processing a statement like 'import name' in the module we're currently under. Args: item: An instance of ImportItem Returns: A filename Raises: ImportException: If the module doesn't exist. """ name = item.name # The last part in `from a.b.c import d` might be a symbol rather than a # module, so we try a.b.c and a.b.c.d as names. short_name = None if item.is_from and not item.is_star: if '.' in name.lstrip('.'): # The name is something like `a.b.c`, so strip off `.c`. rindex = name.rfind('.') else: # The name is something like `..c`, so strip off just `c`. rindex = name.rfind('.') + 1 short_name = name[:rindex] if import_finder.is_builtin(name): filename = name + '.so' return Builtin(filename, name) filename, level = convert_to_path(name) if level: # This is a relative import; we need to resolve the filename # relative to the importing file path. filename = os.path.normpath( os.path.join(self.current_directory, filename)) files = [(name, filename)] if short_name: short_filename = os.path.dirname(filename) files.append((short_name, short_filename)) for module_name, path in files: for fs in self.fs_path: f = self._find_file(fs, path) if not f or f == self.current_module.path: # We cannot import a file from itself. continue if item.is_relative(): package_name = self.current_module.package_name if package_name is None: # Relative import in non-package raise ImportException(name) module_name = get_absolute_name(package_name, module_name) if isinstance(self.current_module, System): return System(f, module_name) return Local(f, module_name, fs) # If the module isn't found in the explicit pythonpath, see if python # itself resolved it. if item.source: prefix, ext = os.path.splitext(item.source) mod_name = name # We need to check for importing a symbol here too. if short_name: mod = prefix.replace(os.path.sep, '.') mod = utils.strip_suffix(mod, '.__init__') if not mod.endswith(name) and mod.endswith(short_name): mod_name = short_name if ext == '.pyc': pyfile = prefix + '.py' if os.path.exists(pyfile): return System(pyfile, mod_name) elif not ext: pyfile = os.path.join(prefix, "__init__.py") if os.path.exists(pyfile): return System(pyfile, mod_name) return System(item.source, mod_name) raise ImportException(name)
['def', 'resolve_import', '(', 'self', ',', 'item', ')', ':', 'name', '=', 'item', '.', 'name', '# The last part in `from a.b.c import d` might be a symbol rather than a', '# module, so we try a.b.c and a.b.c.d as names.', 'short_name', '=', 'None', 'if', 'item', '.', 'is_from', 'and', 'not', 'item', '.', 'is_star', ':', 'if', "'.'", 'in', 'name', '.', 'lstrip', '(', "'.'", ')', ':', '# The name is something like `a.b.c`, so strip off `.c`.', 'rindex', '=', 'name', '.', 'rfind', '(', "'.'", ')', 'else', ':', '# The name is something like `..c`, so strip off just `c`.', 'rindex', '=', 'name', '.', 'rfind', '(', "'.'", ')', '+', '1', 'short_name', '=', 'name', '[', ':', 'rindex', ']', 'if', 'import_finder', '.', 'is_builtin', '(', 'name', ')', ':', 'filename', '=', 'name', '+', "'.so'", 'return', 'Builtin', '(', 'filename', ',', 'name', ')', 'filename', ',', 'level', '=', 'convert_to_path', '(', 'name', ')', 'if', 'level', ':', '# This is a relative import; we need to resolve the filename', '# relative to the importing file path.', 'filename', '=', 'os', '.', 'path', '.', 'normpath', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'current_directory', ',', 'filename', ')', ')', 'files', '=', '[', '(', 'name', ',', 'filename', ')', ']', 'if', 'short_name', ':', 'short_filename', '=', 'os', '.', 'path', '.', 'dirname', '(', 'filename', ')', 'files', '.', 'append', '(', '(', 'short_name', ',', 'short_filename', ')', ')', 'for', 'module_name', ',', 'path', 'in', 'files', ':', 'for', 'fs', 'in', 'self', '.', 'fs_path', ':', 'f', '=', 'self', '.', '_find_file', '(', 'fs', ',', 'path', ')', 'if', 'not', 'f', 'or', 'f', '==', 'self', '.', 'current_module', '.', 'path', ':', '# We cannot import a file from itself.', 'continue', 'if', 'item', '.', 'is_relative', '(', ')', ':', 'package_name', '=', 'self', '.', 'current_module', '.', 'package_name', 'if', 'package_name', 'is', 'None', ':', '# Relative import in non-package', 'raise', 'ImportException', '(', 'name', ')', 'module_name', '=', 'get_absolute_name', '(', 'package_name', ',', 'module_name', ')', 'if', 'isinstance', '(', 'self', '.', 'current_module', ',', 'System', ')', ':', 'return', 'System', '(', 'f', ',', 'module_name', ')', 'return', 'Local', '(', 'f', ',', 'module_name', ',', 'fs', ')', "# If the module isn't found in the explicit pythonpath, see if python", '# itself resolved it.', 'if', 'item', '.', 'source', ':', 'prefix', ',', 'ext', '=', 'os', '.', 'path', '.', 'splitext', '(', 'item', '.', 'source', ')', 'mod_name', '=', 'name', '# We need to check for importing a symbol here too.', 'if', 'short_name', ':', 'mod', '=', 'prefix', '.', 'replace', '(', 'os', '.', 'path', '.', 'sep', ',', "'.'", ')', 'mod', '=', 'utils', '.', 'strip_suffix', '(', 'mod', ',', "'.__init__'", ')', 'if', 'not', 'mod', '.', 'endswith', '(', 'name', ')', 'and', 'mod', '.', 'endswith', '(', 'short_name', ')', ':', 'mod_name', '=', 'short_name', 'if', 'ext', '==', "'.pyc'", ':', 'pyfile', '=', 'prefix', '+', "'.py'", 'if', 'os', '.', 'path', '.', 'exists', '(', 'pyfile', ')', ':', 'return', 'System', '(', 'pyfile', ',', 'mod_name', ')', 'elif', 'not', 'ext', ':', 'pyfile', '=', 'os', '.', 'path', '.', 'join', '(', 'prefix', ',', '"__init__.py"', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'pyfile', ')', ':', 'return', 'System', '(', 'pyfile', ',', 'mod_name', ')', 'return', 'System', '(', 'item', '.', 'source', ',', 'mod_name', ')', 'raise', 'ImportException', '(', 'name', ')']
Simulate how Python resolves imports. Returns the filename of the source file Python would load when processing a statement like 'import name' in the module we're currently under. Args: item: An instance of ImportItem Returns: A filename Raises: ImportException: If the module doesn't exist.
['Simulate', 'how', 'Python', 'resolves', 'imports', '.']
train
https://github.com/google/importlab/blob/92090a0b4421137d1369c2ed952eda6bb4c7a155/importlab/resolve.py#L150-L233
8,843
casacore/python-casacore
casacore/fitting/fitting.py
fitserver.functional
def functional(self, fnct, x, y, sd=None, wt=1.0, mxit=50, fid=0): """Make a non-linear least squares solution. This will make a non-linear least squares solution for the points through the ordinates at the abscissa values, using the specified `fnct`. Details can be found in the :meth:`linear` description. :param fnct: the functional to fit :param x: the abscissa values :param y: the ordinate values :param sd: standard deviation of equations (one or more values used cyclically) :param wt: an optional alternate for `sd` :param mxit: the maximum number of iterations :param fid: the id of the sub-fitter (numerical) """ self._fit(fitfunc="functional", fnct=fnct, x=x, y=y, sd=sd, wt=wt, mxit=mxit, fid=fid)
python
def functional(self, fnct, x, y, sd=None, wt=1.0, mxit=50, fid=0): """Make a non-linear least squares solution. This will make a non-linear least squares solution for the points through the ordinates at the abscissa values, using the specified `fnct`. Details can be found in the :meth:`linear` description. :param fnct: the functional to fit :param x: the abscissa values :param y: the ordinate values :param sd: standard deviation of equations (one or more values used cyclically) :param wt: an optional alternate for `sd` :param mxit: the maximum number of iterations :param fid: the id of the sub-fitter (numerical) """ self._fit(fitfunc="functional", fnct=fnct, x=x, y=y, sd=sd, wt=wt, mxit=mxit, fid=fid)
['def', 'functional', '(', 'self', ',', 'fnct', ',', 'x', ',', 'y', ',', 'sd', '=', 'None', ',', 'wt', '=', '1.0', ',', 'mxit', '=', '50', ',', 'fid', '=', '0', ')', ':', 'self', '.', '_fit', '(', 'fitfunc', '=', '"functional"', ',', 'fnct', '=', 'fnct', ',', 'x', '=', 'x', ',', 'y', '=', 'y', ',', 'sd', '=', 'sd', ',', 'wt', '=', 'wt', ',', 'mxit', '=', 'mxit', ',', 'fid', '=', 'fid', ')']
Make a non-linear least squares solution. This will make a non-linear least squares solution for the points through the ordinates at the abscissa values, using the specified `fnct`. Details can be found in the :meth:`linear` description. :param fnct: the functional to fit :param x: the abscissa values :param y: the ordinate values :param sd: standard deviation of equations (one or more values used cyclically) :param wt: an optional alternate for `sd` :param mxit: the maximum number of iterations :param fid: the id of the sub-fitter (numerical)
['Make', 'a', 'non', '-', 'linear', 'least', 'squares', 'solution', '.']
train
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/fitting/fitting.py#L333-L351
8,844
genepattern/genepattern-python
gp/data.py
_bytes_to_str
def _bytes_to_str(lines): """ Convert all lines from byte string to unicode string, if necessary """ if len(lines) >= 1 and hasattr(lines[0], 'decode'): return [line.decode('utf-8') for line in lines] else: return lines
python
def _bytes_to_str(lines): """ Convert all lines from byte string to unicode string, if necessary """ if len(lines) >= 1 and hasattr(lines[0], 'decode'): return [line.decode('utf-8') for line in lines] else: return lines
['def', '_bytes_to_str', '(', 'lines', ')', ':', 'if', 'len', '(', 'lines', ')', '>=', '1', 'and', 'hasattr', '(', 'lines', '[', '0', ']', ',', "'decode'", ')', ':', 'return', '[', 'line', '.', 'decode', '(', "'utf-8'", ')', 'for', 'line', 'in', 'lines', ']', 'else', ':', 'return', 'lines']
Convert all lines from byte string to unicode string, if necessary
['Convert', 'all', 'lines', 'from', 'byte', 'string', 'to', 'unicode', 'string', 'if', 'necessary']
train
https://github.com/genepattern/genepattern-python/blob/9478ea65362b91c72a94f7300c3de8d710bebb71/gp/data.py#L263-L270
8,845
pypa/setuptools
setuptools/command/egg_info.py
FileList.global_include
def global_include(self, pattern): """ Include all files anywhere in the current directory that match the pattern. This is very inefficient on large file trees. """ if self.allfiles is None: self.findall() match = translate_pattern(os.path.join('**', pattern)) found = [f for f in self.allfiles if match.match(f)] self.extend(found) return bool(found)
python
def global_include(self, pattern): """ Include all files anywhere in the current directory that match the pattern. This is very inefficient on large file trees. """ if self.allfiles is None: self.findall() match = translate_pattern(os.path.join('**', pattern)) found = [f for f in self.allfiles if match.match(f)] self.extend(found) return bool(found)
['def', 'global_include', '(', 'self', ',', 'pattern', ')', ':', 'if', 'self', '.', 'allfiles', 'is', 'None', ':', 'self', '.', 'findall', '(', ')', 'match', '=', 'translate_pattern', '(', 'os', '.', 'path', '.', 'join', '(', "'**'", ',', 'pattern', ')', ')', 'found', '=', '[', 'f', 'for', 'f', 'in', 'self', '.', 'allfiles', 'if', 'match', '.', 'match', '(', 'f', ')', ']', 'self', '.', 'extend', '(', 'found', ')', 'return', 'bool', '(', 'found', ')']
Include all files anywhere in the current directory that match the pattern. This is very inefficient on large file trees.
['Include', 'all', 'files', 'anywhere', 'in', 'the', 'current', 'directory', 'that', 'match', 'the', 'pattern', '.', 'This', 'is', 'very', 'inefficient', 'on', 'large', 'file', 'trees', '.']
train
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/egg_info.py#L454-L464
8,846
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
_validate_edges_do_not_have_extra_links
def _validate_edges_do_not_have_extra_links(class_name, properties): """Validate that edges do not have properties of Link type that aren't the edge endpoints.""" for property_name, property_descriptor in six.iteritems(properties): if property_name in {EDGE_SOURCE_PROPERTY_NAME, EDGE_DESTINATION_PROPERTY_NAME}: continue if property_descriptor.type_id == PROPERTY_TYPE_LINK_ID: raise IllegalSchemaStateError(u'Edge class "{}" has a property of type Link that is ' u'not an edge endpoint, this is not allowed: ' u'{}'.format(class_name, property_name))
python
def _validate_edges_do_not_have_extra_links(class_name, properties): """Validate that edges do not have properties of Link type that aren't the edge endpoints.""" for property_name, property_descriptor in six.iteritems(properties): if property_name in {EDGE_SOURCE_PROPERTY_NAME, EDGE_DESTINATION_PROPERTY_NAME}: continue if property_descriptor.type_id == PROPERTY_TYPE_LINK_ID: raise IllegalSchemaStateError(u'Edge class "{}" has a property of type Link that is ' u'not an edge endpoint, this is not allowed: ' u'{}'.format(class_name, property_name))
['def', '_validate_edges_do_not_have_extra_links', '(', 'class_name', ',', 'properties', ')', ':', 'for', 'property_name', ',', 'property_descriptor', 'in', 'six', '.', 'iteritems', '(', 'properties', ')', ':', 'if', 'property_name', 'in', '{', 'EDGE_SOURCE_PROPERTY_NAME', ',', 'EDGE_DESTINATION_PROPERTY_NAME', '}', ':', 'continue', 'if', 'property_descriptor', '.', 'type_id', '==', 'PROPERTY_TYPE_LINK_ID', ':', 'raise', 'IllegalSchemaStateError', '(', 'u\'Edge class "{}" has a property of type Link that is \'', "u'not an edge endpoint, this is not allowed: '", "u'{}'", '.', 'format', '(', 'class_name', ',', 'property_name', ')', ')']
Validate that edges do not have properties of Link type that aren't the edge endpoints.
['Validate', 'that', 'edges', 'do', 'not', 'have', 'properties', 'of', 'Link', 'type', 'that', 'aren', 't', 'the', 'edge', 'endpoints', '.']
train
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L44-L53
8,847
maceoutliner/django-fiction-outlines
fiction_outlines/receivers.py
validate_generations_for_story_elements
def validate_generations_for_story_elements( sender, instance, action, target_node_type=None, target_node=None, pos=None, *args, **kwargs ): ''' Unlike arc nodes, for which we just warn about structure, the story tree allowed parent/child rules must be strictly enforced. ''' if action == 'add_child': if instance.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, instance.story_element_type))) if action == 'update': parent = instance.get_parent() children = instance.get_children() if parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, parent.story_element_type))) if children: for child in children: if target_node_type not in STORY_NODE_ELEMENT_DEFINITIONS[child.story_element_type]['allowed_parents']: raise IntegrityError(_('%s is not permitted to be a parent of %s' % ( target_node_type, child.story_element_type))) if action == 'add_sibling': parent = instance.get_parent() if parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, parent.story_element_type))) if action == 'move': if not pos or 'sibling' in pos or 'right' in pos or 'left' in pos: parent = target_node.get_parent() if (parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[instance.story_element_type]['allowed_parents']): raise IntegrityError(_('%s is not an allowed child of %s' % ( instance.story_element_type, parent.story_element_type ))) if 'child' in pos: if (target_node.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[instance.story_element_type]['allowed_parents']): raise IntegrityError(_('%s is not an allowed child of %s' % ( instance.story_element_type, target_node.story_element_type )))
python
def validate_generations_for_story_elements( sender, instance, action, target_node_type=None, target_node=None, pos=None, *args, **kwargs ): ''' Unlike arc nodes, for which we just warn about structure, the story tree allowed parent/child rules must be strictly enforced. ''' if action == 'add_child': if instance.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, instance.story_element_type))) if action == 'update': parent = instance.get_parent() children = instance.get_children() if parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, parent.story_element_type))) if children: for child in children: if target_node_type not in STORY_NODE_ELEMENT_DEFINITIONS[child.story_element_type]['allowed_parents']: raise IntegrityError(_('%s is not permitted to be a parent of %s' % ( target_node_type, child.story_element_type))) if action == 'add_sibling': parent = instance.get_parent() if parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[target_node_type]['allowed_parents']: raise IntegrityError(_('%s is not an allowed child of %s' % (target_node_type, parent.story_element_type))) if action == 'move': if not pos or 'sibling' in pos or 'right' in pos or 'left' in pos: parent = target_node.get_parent() if (parent.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[instance.story_element_type]['allowed_parents']): raise IntegrityError(_('%s is not an allowed child of %s' % ( instance.story_element_type, parent.story_element_type ))) if 'child' in pos: if (target_node.story_element_type not in STORY_NODE_ELEMENT_DEFINITIONS[instance.story_element_type]['allowed_parents']): raise IntegrityError(_('%s is not an allowed child of %s' % ( instance.story_element_type, target_node.story_element_type )))
['def', 'validate_generations_for_story_elements', '(', 'sender', ',', 'instance', ',', 'action', ',', 'target_node_type', '=', 'None', ',', 'target_node', '=', 'None', ',', 'pos', '=', 'None', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'action', '==', "'add_child'", ':', 'if', 'instance', '.', 'story_element_type', 'not', 'in', 'STORY_NODE_ELEMENT_DEFINITIONS', '[', 'target_node_type', ']', '[', "'allowed_parents'", ']', ':', 'raise', 'IntegrityError', '(', '_', '(', "'%s is not an allowed child of %s'", '%', '(', 'target_node_type', ',', 'instance', '.', 'story_element_type', ')', ')', ')', 'if', 'action', '==', "'update'", ':', 'parent', '=', 'instance', '.', 'get_parent', '(', ')', 'children', '=', 'instance', '.', 'get_children', '(', ')', 'if', 'parent', '.', 'story_element_type', 'not', 'in', 'STORY_NODE_ELEMENT_DEFINITIONS', '[', 'target_node_type', ']', '[', "'allowed_parents'", ']', ':', 'raise', 'IntegrityError', '(', '_', '(', "'%s is not an allowed child of %s'", '%', '(', 'target_node_type', ',', 'parent', '.', 'story_element_type', ')', ')', ')', 'if', 'children', ':', 'for', 'child', 'in', 'children', ':', 'if', 'target_node_type', 'not', 'in', 'STORY_NODE_ELEMENT_DEFINITIONS', '[', 'child', '.', 'story_element_type', ']', '[', "'allowed_parents'", ']', ':', 'raise', 'IntegrityError', '(', '_', '(', "'%s is not permitted to be a parent of %s'", '%', '(', 'target_node_type', ',', 'child', '.', 'story_element_type', ')', ')', ')', 'if', 'action', '==', "'add_sibling'", ':', 'parent', '=', 'instance', '.', 'get_parent', '(', ')', 'if', 'parent', '.', 'story_element_type', 'not', 'in', 'STORY_NODE_ELEMENT_DEFINITIONS', '[', 'target_node_type', ']', '[', "'allowed_parents'", ']', ':', 'raise', 'IntegrityError', '(', '_', '(', "'%s is not an allowed child of %s'", '%', '(', 'target_node_type', ',', 'parent', '.', 'story_element_type', ')', ')', ')', 'if', 'action', '==', "'move'", ':', 'if', 'not', 'pos', 'or', "'sibling'", 'in', 'pos', 'or', "'right'", 'in', 'pos', 'or', "'left'", 'in', 'pos', ':', 'parent', '=', 'target_node', '.', 'get_parent', '(', ')', 'if', '(', 'parent', '.', 'story_element_type', 'not', 'in', 'STORY_NODE_ELEMENT_DEFINITIONS', '[', 'instance', '.', 'story_element_type', ']', '[', "'allowed_parents'", ']', ')', ':', 'raise', 'IntegrityError', '(', '_', '(', "'%s is not an allowed child of %s'", '%', '(', 'instance', '.', 'story_element_type', ',', 'parent', '.', 'story_element_type', ')', ')', ')', 'if', "'child'", 'in', 'pos', ':', 'if', '(', 'target_node', '.', 'story_element_type', 'not', 'in', 'STORY_NODE_ELEMENT_DEFINITIONS', '[', 'instance', '.', 'story_element_type', ']', '[', "'allowed_parents'", ']', ')', ':', 'raise', 'IntegrityError', '(', '_', '(', "'%s is not an allowed child of %s'", '%', '(', 'instance', '.', 'story_element_type', ',', 'target_node', '.', 'story_element_type', ')', ')', ')']
Unlike arc nodes, for which we just warn about structure, the story tree allowed parent/child rules must be strictly enforced.
['Unlike', 'arc', 'nodes', 'for', 'which', 'we', 'just', 'warn', 'about', 'structure', 'the', 'story', 'tree', 'allowed', 'parent', '/', 'child', 'rules', 'must', 'be', 'strictly', 'enforced', '.']
train
https://github.com/maceoutliner/django-fiction-outlines/blob/6c58e356af3fbe7b23557643ba27e46eaef9d4e3/fiction_outlines/receivers.py#L206-L253
8,848
zagfai/webtul
webtul/task.py
Task.get
def get(self, timeout=10): """get() -> {'id': 32-byte-md5, 'body': msg-body}""" req = self.req({'op': 'GET', 'timeout': timeout}) if req.status_code != 200: return None result = req.json() if result.get('status') != 'ok': return False return result
python
def get(self, timeout=10): """get() -> {'id': 32-byte-md5, 'body': msg-body}""" req = self.req({'op': 'GET', 'timeout': timeout}) if req.status_code != 200: return None result = req.json() if result.get('status') != 'ok': return False return result
['def', 'get', '(', 'self', ',', 'timeout', '=', '10', ')', ':', 'req', '=', 'self', '.', 'req', '(', '{', "'op'", ':', "'GET'", ',', "'timeout'", ':', 'timeout', '}', ')', 'if', 'req', '.', 'status_code', '!=', '200', ':', 'return', 'None', 'result', '=', 'req', '.', 'json', '(', ')', 'if', 'result', '.', 'get', '(', "'status'", ')', '!=', "'ok'", ':', 'return', 'False', 'return', 'result']
get() -> {'id': 32-byte-md5, 'body': msg-body}
['get', '()', '-', '>', '{', 'id', ':', '32', '-', 'byte', '-', 'md5', 'body', ':', 'msg', '-', 'body', '}']
train
https://github.com/zagfai/webtul/blob/58c49928070b56ef54a45b4af20d800b269ad8ce/webtul/task.py#L192-L200
8,849
klahnakoski/pyLibrary
mo_threads/signal.py
Signal.wait
def wait(self): """ PUT THREAD IN WAIT STATE UNTIL SIGNAL IS ACTIVATED """ if self._go: return True with self.lock: if self._go: return True stopper = _allocate_lock() stopper.acquire() if not self.waiting_threads: self.waiting_threads = [stopper] else: self.waiting_threads.append(stopper) DEBUG and self._name and Log.note("wait for go {{name|quote}}", name=self.name) stopper.acquire() DEBUG and self._name and Log.note("GOing! {{name|quote}}", name=self.name) return True
python
def wait(self): """ PUT THREAD IN WAIT STATE UNTIL SIGNAL IS ACTIVATED """ if self._go: return True with self.lock: if self._go: return True stopper = _allocate_lock() stopper.acquire() if not self.waiting_threads: self.waiting_threads = [stopper] else: self.waiting_threads.append(stopper) DEBUG and self._name and Log.note("wait for go {{name|quote}}", name=self.name) stopper.acquire() DEBUG and self._name and Log.note("GOing! {{name|quote}}", name=self.name) return True
['def', 'wait', '(', 'self', ')', ':', 'if', 'self', '.', '_go', ':', 'return', 'True', 'with', 'self', '.', 'lock', ':', 'if', 'self', '.', '_go', ':', 'return', 'True', 'stopper', '=', '_allocate_lock', '(', ')', 'stopper', '.', 'acquire', '(', ')', 'if', 'not', 'self', '.', 'waiting_threads', ':', 'self', '.', 'waiting_threads', '=', '[', 'stopper', ']', 'else', ':', 'self', '.', 'waiting_threads', '.', 'append', '(', 'stopper', ')', 'DEBUG', 'and', 'self', '.', '_name', 'and', 'Log', '.', 'note', '(', '"wait for go {{name|quote}}"', ',', 'name', '=', 'self', '.', 'name', ')', 'stopper', '.', 'acquire', '(', ')', 'DEBUG', 'and', 'self', '.', '_name', 'and', 'Log', '.', 'note', '(', '"GOing! {{name|quote}}"', ',', 'name', '=', 'self', '.', 'name', ')', 'return', 'True']
PUT THREAD IN WAIT STATE UNTIL SIGNAL IS ACTIVATED
['PUT', 'THREAD', 'IN', 'WAIT', 'STATE', 'UNTIL', 'SIGNAL', 'IS', 'ACTIVATED']
train
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_threads/signal.py#L57-L77
8,850
a1ezzz/wasp-general
wasp_general/crypto/rsa.py
WRSA.export_private_key
def export_private_key(self, password=None): """ Export a private key in PEM-format :param password: If it is not None, then result will be encrypt with given password :return: bytes """ if self.__private_key is None: raise ValueError('Unable to call this method. Private key must be set') if password is not None: if isinstance(password, str) is True: password = password.encode() return self.__private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.BestAvailableEncryption(password) ) return self.__private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() )
python
def export_private_key(self, password=None): """ Export a private key in PEM-format :param password: If it is not None, then result will be encrypt with given password :return: bytes """ if self.__private_key is None: raise ValueError('Unable to call this method. Private key must be set') if password is not None: if isinstance(password, str) is True: password = password.encode() return self.__private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.BestAvailableEncryption(password) ) return self.__private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption() )
['def', 'export_private_key', '(', 'self', ',', 'password', '=', 'None', ')', ':', 'if', 'self', '.', '__private_key', 'is', 'None', ':', 'raise', 'ValueError', '(', "'Unable to call this method. Private key must be set'", ')', 'if', 'password', 'is', 'not', 'None', ':', 'if', 'isinstance', '(', 'password', ',', 'str', ')', 'is', 'True', ':', 'password', '=', 'password', '.', 'encode', '(', ')', 'return', 'self', '.', '__private_key', '.', 'private_bytes', '(', 'encoding', '=', 'serialization', '.', 'Encoding', '.', 'PEM', ',', 'format', '=', 'serialization', '.', 'PrivateFormat', '.', 'PKCS8', ',', 'encryption_algorithm', '=', 'serialization', '.', 'BestAvailableEncryption', '(', 'password', ')', ')', 'return', 'self', '.', '__private_key', '.', 'private_bytes', '(', 'encoding', '=', 'serialization', '.', 'Encoding', '.', 'PEM', ',', 'format', '=', 'serialization', '.', 'PrivateFormat', '.', 'TraditionalOpenSSL', ',', 'encryption_algorithm', '=', 'serialization', '.', 'NoEncryption', '(', ')', ')']
Export a private key in PEM-format :param password: If it is not None, then result will be encrypt with given password :return: bytes
['Export', 'a', 'private', 'key', 'in', 'PEM', '-', 'format']
train
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/crypto/rsa.py#L106-L128
8,851
ninuxorg/nodeshot
nodeshot/community/participation/models/__init__.py
create_node_rating_counts_settings
def create_node_rating_counts_settings(sender, **kwargs): """ create node rating count and settings""" created = kwargs['created'] node = kwargs['instance'] if created: # create node_rating_count and settings # task will be executed in background unless settings.CELERY_ALWAYS_EAGER is True # if CELERY_ALWAYS_EAGER is False celery worker must be running otherwise task won't be executed create_related_object.delay(NodeRatingCount, {'node': node}) create_related_object.delay(NodeParticipationSettings, {'node': node})
python
def create_node_rating_counts_settings(sender, **kwargs): """ create node rating count and settings""" created = kwargs['created'] node = kwargs['instance'] if created: # create node_rating_count and settings # task will be executed in background unless settings.CELERY_ALWAYS_EAGER is True # if CELERY_ALWAYS_EAGER is False celery worker must be running otherwise task won't be executed create_related_object.delay(NodeRatingCount, {'node': node}) create_related_object.delay(NodeParticipationSettings, {'node': node})
['def', 'create_node_rating_counts_settings', '(', 'sender', ',', '*', '*', 'kwargs', ')', ':', 'created', '=', 'kwargs', '[', "'created'", ']', 'node', '=', 'kwargs', '[', "'instance'", ']', 'if', 'created', ':', '# create node_rating_count and settings', '# task will be executed in background unless settings.CELERY_ALWAYS_EAGER is True', "# if CELERY_ALWAYS_EAGER is False celery worker must be running otherwise task won't be executed", 'create_related_object', '.', 'delay', '(', 'NodeRatingCount', ',', '{', "'node'", ':', 'node', '}', ')', 'create_related_object', '.', 'delay', '(', 'NodeParticipationSettings', ',', '{', "'node'", ':', 'node', '}', ')']
create node rating count and settings
['create', 'node', 'rating', 'count', 'and', 'settings']
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/community/participation/models/__init__.py#L147-L156
8,852
ArduPilot/MAVProxy
MAVProxy/modules/lib/MacOS/backend_wx.py
MenuButtonWx._handleSelectAllAxes
def _handleSelectAllAxes(self, evt): """Called when the 'select all axes' menu item is selected.""" if len(self._axisId) == 0: return for i in range(len(self._axisId)): self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip()
python
def _handleSelectAllAxes(self, evt): """Called when the 'select all axes' menu item is selected.""" if len(self._axisId) == 0: return for i in range(len(self._axisId)): self._menu.Check(self._axisId[i], True) self._toolbar.set_active(self.getActiveAxes()) evt.Skip()
['def', '_handleSelectAllAxes', '(', 'self', ',', 'evt', ')', ':', 'if', 'len', '(', 'self', '.', '_axisId', ')', '==', '0', ':', 'return', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', '.', '_axisId', ')', ')', ':', 'self', '.', '_menu', '.', 'Check', '(', 'self', '.', '_axisId', '[', 'i', ']', ',', 'True', ')', 'self', '.', '_toolbar', '.', 'set_active', '(', 'self', '.', 'getActiveAxes', '(', ')', ')', 'evt', '.', 'Skip', '(', ')']
Called when the 'select all axes' menu item is selected.
['Called', 'when', 'the', 'select', 'all', 'axes', 'menu', 'item', 'is', 'selected', '.']
train
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/lib/MacOS/backend_wx.py#L1521-L1528
8,853
log2timeline/plaso
plaso/engine/worker.py
EventExtractionWorker._ProcessArchiveTypes
def _ProcessArchiveTypes(self, mediator, path_spec, type_indicators): """Processes a data stream containing archive types such as: TAR or ZIP. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification. type_indicators(list[str]): dfVFS archive type indicators found in the data stream. """ number_of_type_indicators = len(type_indicators) if number_of_type_indicators == 0: return self.processing_status = definitions.STATUS_INDICATOR_COLLECTING if number_of_type_indicators > 1: display_name = mediator.GetDisplayName() logger.debug(( 'Found multiple format type indicators: {0:s} for ' 'archive file: {1:s}').format(type_indicators, display_name)) for type_indicator in type_indicators: if type_indicator == dfvfs_definitions.TYPE_INDICATOR_TAR: archive_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_TAR, location='/', parent=path_spec) elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_ZIP: archive_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_ZIP, location='/', parent=path_spec) else: archive_path_spec = None warning_message = ( 'unsupported archive format type indicator: {0:s}').format( type_indicator) mediator.ProduceExtractionWarning( warning_message, path_spec=path_spec) if archive_path_spec: try: path_spec_generator = self._path_spec_extractor.ExtractPathSpecs( [archive_path_spec], resolver_context=mediator.resolver_context) for generated_path_spec in path_spec_generator: if self._abort: break event_source = event_sources.FileEntryEventSource( path_spec=generated_path_spec) event_source.file_entry_type = ( dfvfs_definitions.FILE_ENTRY_TYPE_FILE) mediator.ProduceEventSource(event_source) self.last_activity_timestamp = time.time() except (IOError, errors.MaximumRecursionDepth) as exception: warning_message = ( 'unable to process archive file with error: {0!s}').format( exception) mediator.ProduceExtractionWarning( warning_message, path_spec=generated_path_spec)
python
def _ProcessArchiveTypes(self, mediator, path_spec, type_indicators): """Processes a data stream containing archive types such as: TAR or ZIP. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification. type_indicators(list[str]): dfVFS archive type indicators found in the data stream. """ number_of_type_indicators = len(type_indicators) if number_of_type_indicators == 0: return self.processing_status = definitions.STATUS_INDICATOR_COLLECTING if number_of_type_indicators > 1: display_name = mediator.GetDisplayName() logger.debug(( 'Found multiple format type indicators: {0:s} for ' 'archive file: {1:s}').format(type_indicators, display_name)) for type_indicator in type_indicators: if type_indicator == dfvfs_definitions.TYPE_INDICATOR_TAR: archive_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_TAR, location='/', parent=path_spec) elif type_indicator == dfvfs_definitions.TYPE_INDICATOR_ZIP: archive_path_spec = path_spec_factory.Factory.NewPathSpec( dfvfs_definitions.TYPE_INDICATOR_ZIP, location='/', parent=path_spec) else: archive_path_spec = None warning_message = ( 'unsupported archive format type indicator: {0:s}').format( type_indicator) mediator.ProduceExtractionWarning( warning_message, path_spec=path_spec) if archive_path_spec: try: path_spec_generator = self._path_spec_extractor.ExtractPathSpecs( [archive_path_spec], resolver_context=mediator.resolver_context) for generated_path_spec in path_spec_generator: if self._abort: break event_source = event_sources.FileEntryEventSource( path_spec=generated_path_spec) event_source.file_entry_type = ( dfvfs_definitions.FILE_ENTRY_TYPE_FILE) mediator.ProduceEventSource(event_source) self.last_activity_timestamp = time.time() except (IOError, errors.MaximumRecursionDepth) as exception: warning_message = ( 'unable to process archive file with error: {0!s}').format( exception) mediator.ProduceExtractionWarning( warning_message, path_spec=generated_path_spec)
['def', '_ProcessArchiveTypes', '(', 'self', ',', 'mediator', ',', 'path_spec', ',', 'type_indicators', ')', ':', 'number_of_type_indicators', '=', 'len', '(', 'type_indicators', ')', 'if', 'number_of_type_indicators', '==', '0', ':', 'return', 'self', '.', 'processing_status', '=', 'definitions', '.', 'STATUS_INDICATOR_COLLECTING', 'if', 'number_of_type_indicators', '>', '1', ':', 'display_name', '=', 'mediator', '.', 'GetDisplayName', '(', ')', 'logger', '.', 'debug', '(', '(', "'Found multiple format type indicators: {0:s} for '", "'archive file: {1:s}'", ')', '.', 'format', '(', 'type_indicators', ',', 'display_name', ')', ')', 'for', 'type_indicator', 'in', 'type_indicators', ':', 'if', 'type_indicator', '==', 'dfvfs_definitions', '.', 'TYPE_INDICATOR_TAR', ':', 'archive_path_spec', '=', 'path_spec_factory', '.', 'Factory', '.', 'NewPathSpec', '(', 'dfvfs_definitions', '.', 'TYPE_INDICATOR_TAR', ',', 'location', '=', "'/'", ',', 'parent', '=', 'path_spec', ')', 'elif', 'type_indicator', '==', 'dfvfs_definitions', '.', 'TYPE_INDICATOR_ZIP', ':', 'archive_path_spec', '=', 'path_spec_factory', '.', 'Factory', '.', 'NewPathSpec', '(', 'dfvfs_definitions', '.', 'TYPE_INDICATOR_ZIP', ',', 'location', '=', "'/'", ',', 'parent', '=', 'path_spec', ')', 'else', ':', 'archive_path_spec', '=', 'None', 'warning_message', '=', '(', "'unsupported archive format type indicator: {0:s}'", ')', '.', 'format', '(', 'type_indicator', ')', 'mediator', '.', 'ProduceExtractionWarning', '(', 'warning_message', ',', 'path_spec', '=', 'path_spec', ')', 'if', 'archive_path_spec', ':', 'try', ':', 'path_spec_generator', '=', 'self', '.', '_path_spec_extractor', '.', 'ExtractPathSpecs', '(', '[', 'archive_path_spec', ']', ',', 'resolver_context', '=', 'mediator', '.', 'resolver_context', ')', 'for', 'generated_path_spec', 'in', 'path_spec_generator', ':', 'if', 'self', '.', '_abort', ':', 'break', 'event_source', '=', 'event_sources', '.', 'FileEntryEventSource', '(', 'path_spec', '=', 'generated_path_spec', ')', 'event_source', '.', 'file_entry_type', '=', '(', 'dfvfs_definitions', '.', 'FILE_ENTRY_TYPE_FILE', ')', 'mediator', '.', 'ProduceEventSource', '(', 'event_source', ')', 'self', '.', 'last_activity_timestamp', '=', 'time', '.', 'time', '(', ')', 'except', '(', 'IOError', ',', 'errors', '.', 'MaximumRecursionDepth', ')', 'as', 'exception', ':', 'warning_message', '=', '(', "'unable to process archive file with error: {0!s}'", ')', '.', 'format', '(', 'exception', ')', 'mediator', '.', 'ProduceExtractionWarning', '(', 'warning_message', ',', 'path_spec', '=', 'generated_path_spec', ')']
Processes a data stream containing archive types such as: TAR or ZIP. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification. type_indicators(list[str]): dfVFS archive type indicators found in the data stream.
['Processes', 'a', 'data', 'stream', 'containing', 'archive', 'types', 'such', 'as', ':', 'TAR', 'or', 'ZIP', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/worker.py#L440-L504
8,854
yeraydiazdiaz/lunr.py
lunr/languages/stemmer.py
nltk_stemmer
def nltk_stemmer(stemmer, token, i=None, tokens=None): """Wrapper around a NLTK SnowballStemmer, which includes stop words for each language. Args: stemmer (SnowballStemmer): Stemmer instance that performs the stemming. token (lunr.Token): The token to stem. i (int): The index of the token in a set. tokens (list): A list of tokens representing the set. """ def wrapped_stem(token, metadata=None): return stemmer.stem(token) return token.update(wrapped_stem)
python
def nltk_stemmer(stemmer, token, i=None, tokens=None): """Wrapper around a NLTK SnowballStemmer, which includes stop words for each language. Args: stemmer (SnowballStemmer): Stemmer instance that performs the stemming. token (lunr.Token): The token to stem. i (int): The index of the token in a set. tokens (list): A list of tokens representing the set. """ def wrapped_stem(token, metadata=None): return stemmer.stem(token) return token.update(wrapped_stem)
['def', 'nltk_stemmer', '(', 'stemmer', ',', 'token', ',', 'i', '=', 'None', ',', 'tokens', '=', 'None', ')', ':', 'def', 'wrapped_stem', '(', 'token', ',', 'metadata', '=', 'None', ')', ':', 'return', 'stemmer', '.', 'stem', '(', 'token', ')', 'return', 'token', '.', 'update', '(', 'wrapped_stem', ')']
Wrapper around a NLTK SnowballStemmer, which includes stop words for each language. Args: stemmer (SnowballStemmer): Stemmer instance that performs the stemming. token (lunr.Token): The token to stem. i (int): The index of the token in a set. tokens (list): A list of tokens representing the set.
['Wrapper', 'around', 'a', 'NLTK', 'SnowballStemmer', 'which', 'includes', 'stop', 'words', 'for', 'each', 'language', '.']
train
https://github.com/yeraydiazdiaz/lunr.py/blob/28ec3f6d4888295eed730211ee9617aa488d6ba3/lunr/languages/stemmer.py#L13-L27
8,855
ceph/ceph-deploy
ceph_deploy/install.py
install_repo
def install_repo(args): """ For a user that only wants to install the repository only (and avoid installing Ceph and its dependencies). """ cd_conf = getattr(args, 'cd_conf', None) for hostname in args.host: LOG.debug('Detecting platform for host %s ...', hostname) distro = hosts.get( hostname, username=args.username, # XXX this should get removed once Ceph packages are split for # upstream. If default_release is True, it means that the user is # trying to install on a RHEL machine and should expect to get RHEL # packages. Otherwise, it will need to specify either a specific # version, or repo, or a development branch. Other distro users should # not see any differences. use_rhceph=args.default_release, ) rlogger = logging.getLogger(hostname) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) custom_repo(distro, args, cd_conf, rlogger, install_ceph=False)
python
def install_repo(args): """ For a user that only wants to install the repository only (and avoid installing Ceph and its dependencies). """ cd_conf = getattr(args, 'cd_conf', None) for hostname in args.host: LOG.debug('Detecting platform for host %s ...', hostname) distro = hosts.get( hostname, username=args.username, # XXX this should get removed once Ceph packages are split for # upstream. If default_release is True, it means that the user is # trying to install on a RHEL machine and should expect to get RHEL # packages. Otherwise, it will need to specify either a specific # version, or repo, or a development branch. Other distro users should # not see any differences. use_rhceph=args.default_release, ) rlogger = logging.getLogger(hostname) LOG.info( 'Distro info: %s %s %s', distro.name, distro.release, distro.codename ) custom_repo(distro, args, cd_conf, rlogger, install_ceph=False)
['def', 'install_repo', '(', 'args', ')', ':', 'cd_conf', '=', 'getattr', '(', 'args', ',', "'cd_conf'", ',', 'None', ')', 'for', 'hostname', 'in', 'args', '.', 'host', ':', 'LOG', '.', 'debug', '(', "'Detecting platform for host %s ...'", ',', 'hostname', ')', 'distro', '=', 'hosts', '.', 'get', '(', 'hostname', ',', 'username', '=', 'args', '.', 'username', ',', '# XXX this should get removed once Ceph packages are split for', '# upstream. If default_release is True, it means that the user is', '# trying to install on a RHEL machine and should expect to get RHEL', '# packages. Otherwise, it will need to specify either a specific', '# version, or repo, or a development branch. Other distro users should', '# not see any differences.', 'use_rhceph', '=', 'args', '.', 'default_release', ',', ')', 'rlogger', '=', 'logging', '.', 'getLogger', '(', 'hostname', ')', 'LOG', '.', 'info', '(', "'Distro info: %s %s %s'", ',', 'distro', '.', 'name', ',', 'distro', '.', 'release', ',', 'distro', '.', 'codename', ')', 'custom_repo', '(', 'distro', ',', 'args', ',', 'cd_conf', ',', 'rlogger', ',', 'install_ceph', '=', 'False', ')']
For a user that only wants to install the repository only (and avoid installing Ceph and its dependencies).
['For', 'a', 'user', 'that', 'only', 'wants', 'to', 'install', 'the', 'repository', 'only', '(', 'and', 'avoid', 'installing', 'Ceph', 'and', 'its', 'dependencies', ')', '.']
train
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/install.py#L287-L316
8,856
driftx/Telephus
telephus/cassandra/Cassandra.py
Client.system_add_column_family
def system_add_column_family(self, cf_def): """ adds a column family. returns the new schema id. Parameters: - cf_def """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_add_column_family(cf_def) return d
python
def system_add_column_family(self, cf_def): """ adds a column family. returns the new schema id. Parameters: - cf_def """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_add_column_family(cf_def) return d
['def', 'system_add_column_family', '(', 'self', ',', 'cf_def', ')', ':', 'self', '.', '_seqid', '+=', '1', 'd', '=', 'self', '.', '_reqs', '[', 'self', '.', '_seqid', ']', '=', 'defer', '.', 'Deferred', '(', ')', 'self', '.', 'send_system_add_column_family', '(', 'cf_def', ')', 'return', 'd']
adds a column family. returns the new schema id. Parameters: - cf_def
['adds', 'a', 'column', 'family', '.', 'returns', 'the', 'new', 'schema', 'id', '.']
train
https://github.com/driftx/Telephus/blob/860a03a0fafe71605e1a4316dfdd8d0c29094703/telephus/cassandra/Cassandra.py#L1656-L1666
8,857
TangentMicroServices/PythonClient
microclient/clients.py
ServiceBase.delete
def delete(self, resource, resource_id): ''' A base function that performs a default delete DELETE request for a given object ''' service_def, resource_def, path = self._get_service_information( resource) delete_path = "{0}{1}/" . format(path, resource_id) return self.call(path=delete_path, method="delete")
python
def delete(self, resource, resource_id): ''' A base function that performs a default delete DELETE request for a given object ''' service_def, resource_def, path = self._get_service_information( resource) delete_path = "{0}{1}/" . format(path, resource_id) return self.call(path=delete_path, method="delete")
['def', 'delete', '(', 'self', ',', 'resource', ',', 'resource_id', ')', ':', 'service_def', ',', 'resource_def', ',', 'path', '=', 'self', '.', '_get_service_information', '(', 'resource', ')', 'delete_path', '=', '"{0}{1}/"', '.', 'format', '(', 'path', ',', 'resource_id', ')', 'return', 'self', '.', 'call', '(', 'path', '=', 'delete_path', ',', 'method', '=', '"delete"', ')']
A base function that performs a default delete DELETE request for a given object
['A', 'base', 'function', 'that', 'performs', 'a', 'default', 'delete', 'DELETE', 'request', 'for', 'a', 'given', 'object']
train
https://github.com/TangentMicroServices/PythonClient/blob/98cc4b3027fa1b2e8a66146a7efe76370fc225f0/microclient/clients.py#L169-L177
8,858
chaoss/grimoirelab-elk
grimoire_elk/enriched/projects.py
GrimoireLibProjects.get_projects
def get_projects(self): """ Get the projects list from database """ repos_list = [] gerrit_projects_db = self.projects_db db = Database(user="root", passwd="", host="localhost", port=3306, scrdb=None, shdb=gerrit_projects_db, prjdb=None) sql = """ SELECT DISTINCT(repository_name) FROM project_repositories WHERE data_source='scr' """ repos_list_raw = db.execute(sql) # Convert from review.openstack.org_openstack/rpm-packaging-tools to # openstack_rpm-packaging-tools for repo in repos_list_raw: # repo_name = repo[0].replace("review.openstack.org_","") repo_name = repo[0].replace(self.repository + "_", "") repos_list.append(repo_name) return repos_list
python
def get_projects(self): """ Get the projects list from database """ repos_list = [] gerrit_projects_db = self.projects_db db = Database(user="root", passwd="", host="localhost", port=3306, scrdb=None, shdb=gerrit_projects_db, prjdb=None) sql = """ SELECT DISTINCT(repository_name) FROM project_repositories WHERE data_source='scr' """ repos_list_raw = db.execute(sql) # Convert from review.openstack.org_openstack/rpm-packaging-tools to # openstack_rpm-packaging-tools for repo in repos_list_raw: # repo_name = repo[0].replace("review.openstack.org_","") repo_name = repo[0].replace(self.repository + "_", "") repos_list.append(repo_name) return repos_list
['def', 'get_projects', '(', 'self', ')', ':', 'repos_list', '=', '[', ']', 'gerrit_projects_db', '=', 'self', '.', 'projects_db', 'db', '=', 'Database', '(', 'user', '=', '"root"', ',', 'passwd', '=', '""', ',', 'host', '=', '"localhost"', ',', 'port', '=', '3306', ',', 'scrdb', '=', 'None', ',', 'shdb', '=', 'gerrit_projects_db', ',', 'prjdb', '=', 'None', ')', 'sql', '=', '"""\n SELECT DISTINCT(repository_name)\n FROM project_repositories\n WHERE data_source=\'scr\'\n """', 'repos_list_raw', '=', 'db', '.', 'execute', '(', 'sql', ')', '# Convert from review.openstack.org_openstack/rpm-packaging-tools to', '# openstack_rpm-packaging-tools', 'for', 'repo', 'in', 'repos_list_raw', ':', '# repo_name = repo[0].replace("review.openstack.org_","")', 'repo_name', '=', 'repo', '[', '0', ']', '.', 'replace', '(', 'self', '.', 'repository', '+', '"_"', ',', '""', ')', 'repos_list', '.', 'append', '(', 'repo_name', ')', 'return', 'repos_list']
Get the projects list from database
['Get', 'the', 'projects', 'list', 'from', 'database']
train
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/projects.py#L37-L62
8,859
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
CustomerLimit.is_all_field_none
def is_all_field_none(self): """ :rtype: bool """ if self._limit_monetary_account is not None: return False if self._limit_monetary_account_remaining is not None: return False if self._limit_card_debit_maestro is not None: return False if self._limit_card_debit_mastercard is not None: return False if self._limit_card_debit_wildcard is not None: return False if self._limit_card_debit_replacement is not None: return False if self._limit_invite_user_premium_limited is not None: return False if self._limit_amount_monthly is not None: return False if self._spent_amount_monthly is not None: return False return True
python
def is_all_field_none(self): """ :rtype: bool """ if self._limit_monetary_account is not None: return False if self._limit_monetary_account_remaining is not None: return False if self._limit_card_debit_maestro is not None: return False if self._limit_card_debit_mastercard is not None: return False if self._limit_card_debit_wildcard is not None: return False if self._limit_card_debit_replacement is not None: return False if self._limit_invite_user_premium_limited is not None: return False if self._limit_amount_monthly is not None: return False if self._spent_amount_monthly is not None: return False return True
['def', 'is_all_field_none', '(', 'self', ')', ':', 'if', 'self', '.', '_limit_monetary_account', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_limit_monetary_account_remaining', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_limit_card_debit_maestro', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_limit_card_debit_mastercard', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_limit_card_debit_wildcard', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_limit_card_debit_replacement', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_limit_invite_user_premium_limited', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_limit_amount_monthly', 'is', 'not', 'None', ':', 'return', 'False', 'if', 'self', '.', '_spent_amount_monthly', 'is', 'not', 'None', ':', 'return', 'False', 'return', 'True']
:rtype: bool
[':', 'rtype', ':', 'bool']
train
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L373-L405
8,860
ergo/ziggurat_foundations
ziggurat_foundations/models/services/resource.py
ResourceService.direct_perms_for_user
def direct_perms_for_user(cls, instance, user, db_session=None): """ returns permissions that given user has for this resource without ones inherited from groups that user belongs to :param instance: :param user: :param db_session: :return: """ db_session = get_db_session(db_session, instance) query = db_session.query( cls.models_proxy.UserResourcePermission.user_id, cls.models_proxy.UserResourcePermission.perm_name, ) query = query.filter(cls.models_proxy.UserResourcePermission.user_id == user.id) query = query.filter( cls.models_proxy.UserResourcePermission.resource_id == instance.resource_id ) perms = [ PermissionTuple(user, row.perm_name, "user", None, instance, False, True) for row in query ] # include all perms if user is the owner of this resource if instance.owner_user_id == user.id: perms.append( PermissionTuple(user, ALL_PERMISSIONS, "user", None, instance, True) ) return perms
python
def direct_perms_for_user(cls, instance, user, db_session=None): """ returns permissions that given user has for this resource without ones inherited from groups that user belongs to :param instance: :param user: :param db_session: :return: """ db_session = get_db_session(db_session, instance) query = db_session.query( cls.models_proxy.UserResourcePermission.user_id, cls.models_proxy.UserResourcePermission.perm_name, ) query = query.filter(cls.models_proxy.UserResourcePermission.user_id == user.id) query = query.filter( cls.models_proxy.UserResourcePermission.resource_id == instance.resource_id ) perms = [ PermissionTuple(user, row.perm_name, "user", None, instance, False, True) for row in query ] # include all perms if user is the owner of this resource if instance.owner_user_id == user.id: perms.append( PermissionTuple(user, ALL_PERMISSIONS, "user", None, instance, True) ) return perms
['def', 'direct_perms_for_user', '(', 'cls', ',', 'instance', ',', 'user', ',', 'db_session', '=', 'None', ')', ':', 'db_session', '=', 'get_db_session', '(', 'db_session', ',', 'instance', ')', 'query', '=', 'db_session', '.', 'query', '(', 'cls', '.', 'models_proxy', '.', 'UserResourcePermission', '.', 'user_id', ',', 'cls', '.', 'models_proxy', '.', 'UserResourcePermission', '.', 'perm_name', ',', ')', 'query', '=', 'query', '.', 'filter', '(', 'cls', '.', 'models_proxy', '.', 'UserResourcePermission', '.', 'user_id', '==', 'user', '.', 'id', ')', 'query', '=', 'query', '.', 'filter', '(', 'cls', '.', 'models_proxy', '.', 'UserResourcePermission', '.', 'resource_id', '==', 'instance', '.', 'resource_id', ')', 'perms', '=', '[', 'PermissionTuple', '(', 'user', ',', 'row', '.', 'perm_name', ',', '"user"', ',', 'None', ',', 'instance', ',', 'False', ',', 'True', ')', 'for', 'row', 'in', 'query', ']', '# include all perms if user is the owner of this resource', 'if', 'instance', '.', 'owner_user_id', '==', 'user', '.', 'id', ':', 'perms', '.', 'append', '(', 'PermissionTuple', '(', 'user', ',', 'ALL_PERMISSIONS', ',', '"user"', ',', 'None', ',', 'instance', ',', 'True', ')', ')', 'return', 'perms']
returns permissions that given user has for this resource without ones inherited from groups that user belongs to :param instance: :param user: :param db_session: :return:
['returns', 'permissions', 'that', 'given', 'user', 'has', 'for', 'this', 'resource', 'without', 'ones', 'inherited', 'from', 'groups', 'that', 'user', 'belongs', 'to']
train
https://github.com/ergo/ziggurat_foundations/blob/9eeec894d08e8d7defa60ddc04b63f69cd4cbeba/ziggurat_foundations/models/services/resource.py#L109-L139
8,861
aparo/pyes
pyes/models.py
ElasticSearchModel.save
def save(self, bulk=False, id=None, parent=None, routing=None, force=False): """ Save the object and returns id """ meta = self._meta conn = meta['connection'] id = id or meta.get("id", None) parent = parent or meta.get('parent', None) routing = routing or meta.get('routing', None) qargs = None if routing: qargs={'routing': routing} version = meta.get('version', None) if force: version = None res = conn.index(self, meta.index, meta.type, id, parent=parent, bulk=bulk, version=version, force_insert=force, querystring_args=qargs) if not bulk: self._meta.id = res._id self._meta.version = res._version return res._id return id
python
def save(self, bulk=False, id=None, parent=None, routing=None, force=False): """ Save the object and returns id """ meta = self._meta conn = meta['connection'] id = id or meta.get("id", None) parent = parent or meta.get('parent', None) routing = routing or meta.get('routing', None) qargs = None if routing: qargs={'routing': routing} version = meta.get('version', None) if force: version = None res = conn.index(self, meta.index, meta.type, id, parent=parent, bulk=bulk, version=version, force_insert=force, querystring_args=qargs) if not bulk: self._meta.id = res._id self._meta.version = res._version return res._id return id
['def', 'save', '(', 'self', ',', 'bulk', '=', 'False', ',', 'id', '=', 'None', ',', 'parent', '=', 'None', ',', 'routing', '=', 'None', ',', 'force', '=', 'False', ')', ':', 'meta', '=', 'self', '.', '_meta', 'conn', '=', 'meta', '[', "'connection'", ']', 'id', '=', 'id', 'or', 'meta', '.', 'get', '(', '"id"', ',', 'None', ')', 'parent', '=', 'parent', 'or', 'meta', '.', 'get', '(', "'parent'", ',', 'None', ')', 'routing', '=', 'routing', 'or', 'meta', '.', 'get', '(', "'routing'", ',', 'None', ')', 'qargs', '=', 'None', 'if', 'routing', ':', 'qargs', '=', '{', "'routing'", ':', 'routing', '}', 'version', '=', 'meta', '.', 'get', '(', "'version'", ',', 'None', ')', 'if', 'force', ':', 'version', '=', 'None', 'res', '=', 'conn', '.', 'index', '(', 'self', ',', 'meta', '.', 'index', ',', 'meta', '.', 'type', ',', 'id', ',', 'parent', '=', 'parent', ',', 'bulk', '=', 'bulk', ',', 'version', '=', 'version', ',', 'force_insert', '=', 'force', ',', 'querystring_args', '=', 'qargs', ')', 'if', 'not', 'bulk', ':', 'self', '.', '_meta', '.', 'id', '=', 'res', '.', '_id', 'self', '.', '_meta', '.', 'version', '=', 'res', '.', '_version', 'return', 'res', '.', '_id', 'return', 'id']
Save the object and returns id
['Save', 'the', 'object', 'and', 'returns', 'id']
train
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/models.py#L66-L89
8,862
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence.parse_token
def parse_token(self, token, tags=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Returns the arguments for Sentence.append() from a tagged token representation. The order in which token tags appear can be specified. The default order is (separated by slashes): - word, - part-of-speech, - (IOB-)chunk, - (IOB-)preposition, - chunk(-relation)(-role), - anchor, - lemma. Examples: The/DT/B-NP/O/NP-SBJ-1/O/the cats/NNS/I-NP/O/NP-SBJ-1/O/cat clawed/VBD/B-VP/O/VP-1/A1/claw at/IN/B-PP/B-PNP/PP/P1/at the/DT/B-NP/I-PNP/NP/P1/the sofa/NN/I-NP/I-PNP/NP/P1/sofa ././O/O/O/O/. Returns a (word, lemma, type, chunk, role, relation, preposition, anchor, iob, custom)-tuple, which can be passed to Sentence.append(): Sentence.append(*Sentence.parse_token("cats/NNS/NP")) The custom value is a dictionary of (tag, value)-items of unrecognized tags in the token. """ p = { WORD: "", POS: None, IOB: None, CHUNK: None, PNP: None, REL: None, ROLE: None, ANCHOR: None, LEMMA: None } # Split the slash-formatted token into separate tags in the given order. # Decode &slash; characters (usually in words and lemmata). # Assume None for missing tags (except the word itself, which defaults to an empty string). custom = {} for k, v in izip(tags, token.split("/")): if SLASH0 in v: v = v.replace(SLASH, "/") if k not in p: custom[k] = None if v != OUTSIDE or k == WORD or k == LEMMA: # "type O negative" => "O" != OUTSIDE. (p if k not in custom else custom)[k] = v # Split IOB-prefix from the chunk tag: # B- marks the start of a new chunk, # I- marks inside of a chunk. ch = p[CHUNK] if ch is not None and ch.startswith(("B-", "I-")): p[IOB], p[CHUNK] = ch[:1], ch[2:] # B-NP # Split the role from the relation: # NP-SBJ-1 => relation id is 1 and role is SBJ, # VP-1 => relation id is 1 with no role. # Tokens may be tagged with multiple relations (e.g., NP-OBJ-1*NP-OBJ-3). if p[REL] is not None: ch, p[REL], p[ROLE] = self._parse_relation(p[REL]) # Infer a missing chunk tag from the relation tag (e.g., NP-SBJ-1 => NP). # For PP relation tags (e.g., PP-CLR-1), the first chunk is PP, the following chunks NP. if ch == "PP" \ and self._previous \ and self._previous[REL] == p[REL] \ and self._previous[ROLE] == p[ROLE]: ch = "NP" if p[CHUNK] is None and ch != OUTSIDE: p[CHUNK] = ch self._previous = p # Return the tags in the right order for Sentence.append(). return p[WORD], p[LEMMA], p[POS], p[CHUNK], p[ROLE], p[REL], p[PNP], p[ANCHOR], p[IOB], custom
python
def parse_token(self, token, tags=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Returns the arguments for Sentence.append() from a tagged token representation. The order in which token tags appear can be specified. The default order is (separated by slashes): - word, - part-of-speech, - (IOB-)chunk, - (IOB-)preposition, - chunk(-relation)(-role), - anchor, - lemma. Examples: The/DT/B-NP/O/NP-SBJ-1/O/the cats/NNS/I-NP/O/NP-SBJ-1/O/cat clawed/VBD/B-VP/O/VP-1/A1/claw at/IN/B-PP/B-PNP/PP/P1/at the/DT/B-NP/I-PNP/NP/P1/the sofa/NN/I-NP/I-PNP/NP/P1/sofa ././O/O/O/O/. Returns a (word, lemma, type, chunk, role, relation, preposition, anchor, iob, custom)-tuple, which can be passed to Sentence.append(): Sentence.append(*Sentence.parse_token("cats/NNS/NP")) The custom value is a dictionary of (tag, value)-items of unrecognized tags in the token. """ p = { WORD: "", POS: None, IOB: None, CHUNK: None, PNP: None, REL: None, ROLE: None, ANCHOR: None, LEMMA: None } # Split the slash-formatted token into separate tags in the given order. # Decode &slash; characters (usually in words and lemmata). # Assume None for missing tags (except the word itself, which defaults to an empty string). custom = {} for k, v in izip(tags, token.split("/")): if SLASH0 in v: v = v.replace(SLASH, "/") if k not in p: custom[k] = None if v != OUTSIDE or k == WORD or k == LEMMA: # "type O negative" => "O" != OUTSIDE. (p if k not in custom else custom)[k] = v # Split IOB-prefix from the chunk tag: # B- marks the start of a new chunk, # I- marks inside of a chunk. ch = p[CHUNK] if ch is not None and ch.startswith(("B-", "I-")): p[IOB], p[CHUNK] = ch[:1], ch[2:] # B-NP # Split the role from the relation: # NP-SBJ-1 => relation id is 1 and role is SBJ, # VP-1 => relation id is 1 with no role. # Tokens may be tagged with multiple relations (e.g., NP-OBJ-1*NP-OBJ-3). if p[REL] is not None: ch, p[REL], p[ROLE] = self._parse_relation(p[REL]) # Infer a missing chunk tag from the relation tag (e.g., NP-SBJ-1 => NP). # For PP relation tags (e.g., PP-CLR-1), the first chunk is PP, the following chunks NP. if ch == "PP" \ and self._previous \ and self._previous[REL] == p[REL] \ and self._previous[ROLE] == p[ROLE]: ch = "NP" if p[CHUNK] is None and ch != OUTSIDE: p[CHUNK] = ch self._previous = p # Return the tags in the right order for Sentence.append(). return p[WORD], p[LEMMA], p[POS], p[CHUNK], p[ROLE], p[REL], p[PNP], p[ANCHOR], p[IOB], custom
['def', 'parse_token', '(', 'self', ',', 'token', ',', 'tags', '=', '[', 'WORD', ',', 'POS', ',', 'CHUNK', ',', 'PNP', ',', 'REL', ',', 'ANCHOR', ',', 'LEMMA', ']', ')', ':', 'p', '=', '{', 'WORD', ':', '""', ',', 'POS', ':', 'None', ',', 'IOB', ':', 'None', ',', 'CHUNK', ':', 'None', ',', 'PNP', ':', 'None', ',', 'REL', ':', 'None', ',', 'ROLE', ':', 'None', ',', 'ANCHOR', ':', 'None', ',', 'LEMMA', ':', 'None', '}', '# Split the slash-formatted token into separate tags in the given order.', '# Decode &slash; characters (usually in words and lemmata).', '# Assume None for missing tags (except the word itself, which defaults to an empty string).', 'custom', '=', '{', '}', 'for', 'k', ',', 'v', 'in', 'izip', '(', 'tags', ',', 'token', '.', 'split', '(', '"/"', ')', ')', ':', 'if', 'SLASH0', 'in', 'v', ':', 'v', '=', 'v', '.', 'replace', '(', 'SLASH', ',', '"/"', ')', 'if', 'k', 'not', 'in', 'p', ':', 'custom', '[', 'k', ']', '=', 'None', 'if', 'v', '!=', 'OUTSIDE', 'or', 'k', '==', 'WORD', 'or', 'k', '==', 'LEMMA', ':', '# "type O negative" => "O" != OUTSIDE.', '(', 'p', 'if', 'k', 'not', 'in', 'custom', 'else', 'custom', ')', '[', 'k', ']', '=', 'v', '# Split IOB-prefix from the chunk tag:', '# B- marks the start of a new chunk, ', '# I- marks inside of a chunk.', 'ch', '=', 'p', '[', 'CHUNK', ']', 'if', 'ch', 'is', 'not', 'None', 'and', 'ch', '.', 'startswith', '(', '(', '"B-"', ',', '"I-"', ')', ')', ':', 'p', '[', 'IOB', ']', ',', 'p', '[', 'CHUNK', ']', '=', 'ch', '[', ':', '1', ']', ',', 'ch', '[', '2', ':', ']', '# B-NP', '# Split the role from the relation:', '# NP-SBJ-1 => relation id is 1 and role is SBJ, ', '# VP-1 => relation id is 1 with no role.', '# Tokens may be tagged with multiple relations (e.g., NP-OBJ-1*NP-OBJ-3).', 'if', 'p', '[', 'REL', ']', 'is', 'not', 'None', ':', 'ch', ',', 'p', '[', 'REL', ']', ',', 'p', '[', 'ROLE', ']', '=', 'self', '.', '_parse_relation', '(', 'p', '[', 'REL', ']', ')', '# Infer a missing chunk tag from the relation tag (e.g., NP-SBJ-1 => NP).', '# For PP relation tags (e.g., PP-CLR-1), the first chunk is PP, the following chunks NP.', 'if', 'ch', '==', '"PP"', 'and', 'self', '.', '_previous', 'and', 'self', '.', '_previous', '[', 'REL', ']', '==', 'p', '[', 'REL', ']', 'and', 'self', '.', '_previous', '[', 'ROLE', ']', '==', 'p', '[', 'ROLE', ']', ':', 'ch', '=', '"NP"', 'if', 'p', '[', 'CHUNK', ']', 'is', 'None', 'and', 'ch', '!=', 'OUTSIDE', ':', 'p', '[', 'CHUNK', ']', '=', 'ch', 'self', '.', '_previous', '=', 'p', '# Return the tags in the right order for Sentence.append().', 'return', 'p', '[', 'WORD', ']', ',', 'p', '[', 'LEMMA', ']', ',', 'p', '[', 'POS', ']', ',', 'p', '[', 'CHUNK', ']', ',', 'p', '[', 'ROLE', ']', ',', 'p', '[', 'REL', ']', ',', 'p', '[', 'PNP', ']', ',', 'p', '[', 'ANCHOR', ']', ',', 'p', '[', 'IOB', ']', ',', 'custom']
Returns the arguments for Sentence.append() from a tagged token representation. The order in which token tags appear can be specified. The default order is (separated by slashes): - word, - part-of-speech, - (IOB-)chunk, - (IOB-)preposition, - chunk(-relation)(-role), - anchor, - lemma. Examples: The/DT/B-NP/O/NP-SBJ-1/O/the cats/NNS/I-NP/O/NP-SBJ-1/O/cat clawed/VBD/B-VP/O/VP-1/A1/claw at/IN/B-PP/B-PNP/PP/P1/at the/DT/B-NP/I-PNP/NP/P1/the sofa/NN/I-NP/I-PNP/NP/P1/sofa ././O/O/O/O/. Returns a (word, lemma, type, chunk, role, relation, preposition, anchor, iob, custom)-tuple, which can be passed to Sentence.append(): Sentence.append(*Sentence.parse_token("cats/NNS/NP")) The custom value is a dictionary of (tag, value)-items of unrecognized tags in the token.
['Returns', 'the', 'arguments', 'for', 'Sentence', '.', 'append', '()', 'from', 'a', 'tagged', 'token', 'representation', '.', 'The', 'order', 'in', 'which', 'token', 'tags', 'appear', 'can', 'be', 'specified', '.', 'The', 'default', 'order', 'is', '(', 'separated', 'by', 'slashes', ')', ':', '-', 'word', '-', 'part', '-', 'of', '-', 'speech', '-', '(', 'IOB', '-', ')', 'chunk', '-', '(', 'IOB', '-', ')', 'preposition', '-', 'chunk', '(', '-', 'relation', ')', '(', '-', 'role', ')', '-', 'anchor', '-', 'lemma', '.', 'Examples', ':', 'The', '/', 'DT', '/', 'B', '-', 'NP', '/', 'O', '/', 'NP', '-', 'SBJ', '-', '1', '/', 'O', '/', 'the', 'cats', '/', 'NNS', '/', 'I', '-', 'NP', '/', 'O', '/', 'NP', '-', 'SBJ', '-', '1', '/', 'O', '/', 'cat', 'clawed', '/', 'VBD', '/', 'B', '-', 'VP', '/', 'O', '/', 'VP', '-', '1', '/', 'A1', '/', 'claw', 'at', '/', 'IN', '/', 'B', '-', 'PP', '/', 'B', '-', 'PNP', '/', 'PP', '/', 'P1', '/', 'at', 'the', '/', 'DT', '/', 'B', '-', 'NP', '/', 'I', '-', 'PNP', '/', 'NP', '/', 'P1', '/', 'the', 'sofa', '/', 'NN', '/', 'I', '-', 'NP', '/', 'I', '-', 'PNP', '/', 'NP', '/', 'P1', '/', 'sofa', '.', '/', '.', '/', 'O', '/', 'O', '/', 'O', '/', 'O', '/', '.', 'Returns', 'a', '(', 'word', 'lemma', 'type', 'chunk', 'role', 'relation', 'preposition', 'anchor', 'iob', 'custom', ')', '-', 'tuple', 'which', 'can', 'be', 'passed', 'to', 'Sentence', '.', 'append', '()', ':', 'Sentence', '.', 'append', '(', '*', 'Sentence', '.', 'parse_token', '(', 'cats', '/', 'NNS', '/', 'NP', '))', 'The', 'custom', 'value', 'is', 'a', 'dictionary', 'of', '(', 'tag', 'value', ')', '-', 'items', 'of', 'unrecognized', 'tags', 'in', 'the', 'token', '.']
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L730-L796
8,863
what-studio/profiling
profiling/tracing/__init__.py
TracingProfiler.record_leaving
def record_leaving(self, time, code, frame_key, parent_stats): """Left from a function call.""" try: stats = parent_stats.get_child(code) time_entered = self._times_entered.pop((code, frame_key)) except KeyError: return time_elapsed = time - time_entered stats.deep_time += max(0, time_elapsed)
python
def record_leaving(self, time, code, frame_key, parent_stats): """Left from a function call.""" try: stats = parent_stats.get_child(code) time_entered = self._times_entered.pop((code, frame_key)) except KeyError: return time_elapsed = time - time_entered stats.deep_time += max(0, time_elapsed)
['def', 'record_leaving', '(', 'self', ',', 'time', ',', 'code', ',', 'frame_key', ',', 'parent_stats', ')', ':', 'try', ':', 'stats', '=', 'parent_stats', '.', 'get_child', '(', 'code', ')', 'time_entered', '=', 'self', '.', '_times_entered', '.', 'pop', '(', '(', 'code', ',', 'frame_key', ')', ')', 'except', 'KeyError', ':', 'return', 'time_elapsed', '=', 'time', '-', 'time_entered', 'stats', '.', 'deep_time', '+=', 'max', '(', '0', ',', 'time_elapsed', ')']
Left from a function call.
['Left', 'from', 'a', 'function', 'call', '.']
train
https://github.com/what-studio/profiling/blob/49666ba3ea295eb73782ae6c18a4ec7929d7d8b7/profiling/tracing/__init__.py#L116-L124
8,864
twisted/txaws
txaws/server/resource.py
QueryAPI.get_call_arguments
def get_call_arguments(self, request): """ Get call arguments from a request. Override this if you want to use a wire format different from AWS's. The return value is a dictionary with three keys: 'transport_args', 'handler_args', and 'raw_args'. The value of 'transport_args' must be a dictionary with the following keys: - action - access_key_id - timestamp - expires - version - signature_method - signature - signature_version The value of 'handler_args' should be the application arguments that are meant to be passed to the action handler. The value of 'raw_args', the unprocessed arguments, are used for signature verification. This should be the same dictionary of data that the client used to sign the request. Note that this data must not contain the signature itself. """ params = dict((k, v[-1]) for k, v in request.args.iteritems()) args, rest = self.schema.extract(params) # Get rid of Signature so it doesn't mess with signature verification params.pop("Signature") result = { "transport_args": { "action": args.Action, "access_key_id": args.AWSAccessKeyId, "timestamp": args.Timestamp, "expires": args.Expires, "version": args.Version, "signature_method": args.SignatureMethod, "signature": args.Signature, "signature_version": args.SignatureVersion}, "handler_args": rest, "raw_args": params } return result
python
def get_call_arguments(self, request): """ Get call arguments from a request. Override this if you want to use a wire format different from AWS's. The return value is a dictionary with three keys: 'transport_args', 'handler_args', and 'raw_args'. The value of 'transport_args' must be a dictionary with the following keys: - action - access_key_id - timestamp - expires - version - signature_method - signature - signature_version The value of 'handler_args' should be the application arguments that are meant to be passed to the action handler. The value of 'raw_args', the unprocessed arguments, are used for signature verification. This should be the same dictionary of data that the client used to sign the request. Note that this data must not contain the signature itself. """ params = dict((k, v[-1]) for k, v in request.args.iteritems()) args, rest = self.schema.extract(params) # Get rid of Signature so it doesn't mess with signature verification params.pop("Signature") result = { "transport_args": { "action": args.Action, "access_key_id": args.AWSAccessKeyId, "timestamp": args.Timestamp, "expires": args.Expires, "version": args.Version, "signature_method": args.SignatureMethod, "signature": args.Signature, "signature_version": args.SignatureVersion}, "handler_args": rest, "raw_args": params } return result
['def', 'get_call_arguments', '(', 'self', ',', 'request', ')', ':', 'params', '=', 'dict', '(', '(', 'k', ',', 'v', '[', '-', '1', ']', ')', 'for', 'k', ',', 'v', 'in', 'request', '.', 'args', '.', 'iteritems', '(', ')', ')', 'args', ',', 'rest', '=', 'self', '.', 'schema', '.', 'extract', '(', 'params', ')', "# Get rid of Signature so it doesn't mess with signature verification", 'params', '.', 'pop', '(', '"Signature"', ')', 'result', '=', '{', '"transport_args"', ':', '{', '"action"', ':', 'args', '.', 'Action', ',', '"access_key_id"', ':', 'args', '.', 'AWSAccessKeyId', ',', '"timestamp"', ':', 'args', '.', 'Timestamp', ',', '"expires"', ':', 'args', '.', 'Expires', ',', '"version"', ':', 'args', '.', 'Version', ',', '"signature_method"', ':', 'args', '.', 'SignatureMethod', ',', '"signature"', ':', 'args', '.', 'Signature', ',', '"signature_version"', ':', 'args', '.', 'SignatureVersion', '}', ',', '"handler_args"', ':', 'rest', ',', '"raw_args"', ':', 'params', '}', 'return', 'result']
Get call arguments from a request. Override this if you want to use a wire format different from AWS's. The return value is a dictionary with three keys: 'transport_args', 'handler_args', and 'raw_args'. The value of 'transport_args' must be a dictionary with the following keys: - action - access_key_id - timestamp - expires - version - signature_method - signature - signature_version The value of 'handler_args' should be the application arguments that are meant to be passed to the action handler. The value of 'raw_args', the unprocessed arguments, are used for signature verification. This should be the same dictionary of data that the client used to sign the request. Note that this data must not contain the signature itself.
['Get', 'call', 'arguments', 'from', 'a', 'request', '.', 'Override', 'this', 'if', 'you', 'want', 'to', 'use', 'a', 'wire', 'format', 'different', 'from', 'AWS', 's', '.']
train
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/resource.py#L169-L214
8,865
IDSIA/sacred
sacred/metrics_logger.py
linearize_metrics
def linearize_metrics(logged_metrics): """ Group metrics by name. Takes a list of individual measurements, possibly belonging to different metrics and groups them by name. :param logged_metrics: A list of ScalarMetricLogEntries :return: Measured values grouped by the metric name: {"metric_name1": {"steps": [0,1,2], "values": [4, 5, 6], "timestamps": [datetime, datetime, datetime]}, "metric_name2": {...}} """ metrics_by_name = {} for metric_entry in logged_metrics: if metric_entry.name not in metrics_by_name: metrics_by_name[metric_entry.name] = { "steps": [], "values": [], "timestamps": [], "name": metric_entry.name } metrics_by_name[metric_entry.name]["steps"] \ .append(metric_entry.step) metrics_by_name[metric_entry.name]["values"] \ .append(metric_entry.value) metrics_by_name[metric_entry.name]["timestamps"] \ .append(metric_entry.timestamp) return metrics_by_name
python
def linearize_metrics(logged_metrics): """ Group metrics by name. Takes a list of individual measurements, possibly belonging to different metrics and groups them by name. :param logged_metrics: A list of ScalarMetricLogEntries :return: Measured values grouped by the metric name: {"metric_name1": {"steps": [0,1,2], "values": [4, 5, 6], "timestamps": [datetime, datetime, datetime]}, "metric_name2": {...}} """ metrics_by_name = {} for metric_entry in logged_metrics: if metric_entry.name not in metrics_by_name: metrics_by_name[metric_entry.name] = { "steps": [], "values": [], "timestamps": [], "name": metric_entry.name } metrics_by_name[metric_entry.name]["steps"] \ .append(metric_entry.step) metrics_by_name[metric_entry.name]["values"] \ .append(metric_entry.value) metrics_by_name[metric_entry.name]["timestamps"] \ .append(metric_entry.timestamp) return metrics_by_name
['def', 'linearize_metrics', '(', 'logged_metrics', ')', ':', 'metrics_by_name', '=', '{', '}', 'for', 'metric_entry', 'in', 'logged_metrics', ':', 'if', 'metric_entry', '.', 'name', 'not', 'in', 'metrics_by_name', ':', 'metrics_by_name', '[', 'metric_entry', '.', 'name', ']', '=', '{', '"steps"', ':', '[', ']', ',', '"values"', ':', '[', ']', ',', '"timestamps"', ':', '[', ']', ',', '"name"', ':', 'metric_entry', '.', 'name', '}', 'metrics_by_name', '[', 'metric_entry', '.', 'name', ']', '[', '"steps"', ']', '.', 'append', '(', 'metric_entry', '.', 'step', ')', 'metrics_by_name', '[', 'metric_entry', '.', 'name', ']', '[', '"values"', ']', '.', 'append', '(', 'metric_entry', '.', 'value', ')', 'metrics_by_name', '[', 'metric_entry', '.', 'name', ']', '[', '"timestamps"', ']', '.', 'append', '(', 'metric_entry', '.', 'timestamp', ')', 'return', 'metrics_by_name']
Group metrics by name. Takes a list of individual measurements, possibly belonging to different metrics and groups them by name. :param logged_metrics: A list of ScalarMetricLogEntries :return: Measured values grouped by the metric name: {"metric_name1": {"steps": [0,1,2], "values": [4, 5, 6], "timestamps": [datetime, datetime, datetime]}, "metric_name2": {...}}
['Group', 'metrics', 'by', 'name', '.']
train
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/metrics_logger.py#L85-L113
8,866
vtkiorg/vtki
vtki/plotting.py
BasePlotter.image
def image(self): """ Returns an image array of current render window """ if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'): return self.last_image ifilter = vtk.vtkWindowToImageFilter() ifilter.SetInput(self.ren_win) ifilter.ReadFrontBufferOff() if self.image_transparent_background: ifilter.SetInputBufferTypeToRGBA() else: ifilter.SetInputBufferTypeToRGB() return self._run_image_filter(ifilter)
python
def image(self): """ Returns an image array of current render window """ if not hasattr(self, 'ren_win') and hasattr(self, 'last_image'): return self.last_image ifilter = vtk.vtkWindowToImageFilter() ifilter.SetInput(self.ren_win) ifilter.ReadFrontBufferOff() if self.image_transparent_background: ifilter.SetInputBufferTypeToRGBA() else: ifilter.SetInputBufferTypeToRGB() return self._run_image_filter(ifilter)
['def', 'image', '(', 'self', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'ren_win'", ')', 'and', 'hasattr', '(', 'self', ',', "'last_image'", ')', ':', 'return', 'self', '.', 'last_image', 'ifilter', '=', 'vtk', '.', 'vtkWindowToImageFilter', '(', ')', 'ifilter', '.', 'SetInput', '(', 'self', '.', 'ren_win', ')', 'ifilter', '.', 'ReadFrontBufferOff', '(', ')', 'if', 'self', '.', 'image_transparent_background', ':', 'ifilter', '.', 'SetInputBufferTypeToRGBA', '(', ')', 'else', ':', 'ifilter', '.', 'SetInputBufferTypeToRGB', '(', ')', 'return', 'self', '.', '_run_image_filter', '(', 'ifilter', ')']
Returns an image array of current render window
['Returns', 'an', 'image', 'array', 'of', 'current', 'render', 'window']
train
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L2060-L2071
8,867
fredericklussier/ObservablePy
observablePy/ObservableStore.py
ObservableStore.isObservableElement
def isObservableElement(self, elementName): """ Mention if an element is an observable element. :param str ElementName: the element name to evaluate :return: true if is an observable element, otherwise false. :rtype: bool """ if not(isinstance(elementName, str)): raise TypeError( "Element name should be a string ." + "I receive this {0}" .format(elementName)) return (True if (elementName == "*") else self._evaluateString(elementName))
python
def isObservableElement(self, elementName): """ Mention if an element is an observable element. :param str ElementName: the element name to evaluate :return: true if is an observable element, otherwise false. :rtype: bool """ if not(isinstance(elementName, str)): raise TypeError( "Element name should be a string ." + "I receive this {0}" .format(elementName)) return (True if (elementName == "*") else self._evaluateString(elementName))
['def', 'isObservableElement', '(', 'self', ',', 'elementName', ')', ':', 'if', 'not', '(', 'isinstance', '(', 'elementName', ',', 'str', ')', ')', ':', 'raise', 'TypeError', '(', '"Element name should be a string ."', '+', '"I receive this {0}"', '.', 'format', '(', 'elementName', ')', ')', 'return', '(', 'True', 'if', '(', 'elementName', '==', '"*"', ')', 'else', 'self', '.', '_evaluateString', '(', 'elementName', ')', ')']
Mention if an element is an observable element. :param str ElementName: the element name to evaluate :return: true if is an observable element, otherwise false. :rtype: bool
['Mention', 'if', 'an', 'element', 'is', 'an', 'observable', 'element', '.']
train
https://github.com/fredericklussier/ObservablePy/blob/fd7926a0568621f80b1d567d18f199976f1fa4e8/observablePy/ObservableStore.py#L43-L58
8,868
heroku/heroku.py
heroku/models.py
App.processes
def processes(self): """The proccesses for this app.""" return self._h._get_resources( resource=('apps', self.name, 'ps'), obj=Process, app=self, map=ProcessListResource )
python
def processes(self): """The proccesses for this app.""" return self._h._get_resources( resource=('apps', self.name, 'ps'), obj=Process, app=self, map=ProcessListResource )
['def', 'processes', '(', 'self', ')', ':', 'return', 'self', '.', '_h', '.', '_get_resources', '(', 'resource', '=', '(', "'apps'", ',', 'self', '.', 'name', ',', "'ps'", ')', ',', 'obj', '=', 'Process', ',', 'app', '=', 'self', ',', 'map', '=', 'ProcessListResource', ')']
The proccesses for this app.
['The', 'proccesses', 'for', 'this', 'app', '.']
train
https://github.com/heroku/heroku.py/blob/cadc0a074896cf29c65a457c5c5bdb2069470af0/heroku/models.py#L238-L243
8,869
inasafe/inasafe
safe/gui/widgets/dock.py
Dock._validate_question_area
def _validate_question_area(self): """Helper method to evaluate the current state of the dialog. This function will determine if it is appropriate for the OK button to be enabled or not. .. note:: The enabled state of the OK button on the dialog will NOT be updated (set True or False) depending on the outcome of the UI readiness tests performed - **only** True or False will be returned by the function. :returns: A two-tuple where the first element is a Boolean reflecting the results of the validation tests and the second is a message indicating any reason why the validation may have failed. :rtype: (Boolean, safe.messaging.Message) Example:: flag,message = self._validate_question_area() """ hazard_index = self.hazard_layer_combo.currentIndex() exposure_index = self.exposure_layer_combo.currentIndex() if hazard_index == -1 or exposure_index == -1: if self.conflicting_plugin_detected: message = conflicting_plugin_message() else: message = getting_started_message() return False, message else: return True, None
python
def _validate_question_area(self): """Helper method to evaluate the current state of the dialog. This function will determine if it is appropriate for the OK button to be enabled or not. .. note:: The enabled state of the OK button on the dialog will NOT be updated (set True or False) depending on the outcome of the UI readiness tests performed - **only** True or False will be returned by the function. :returns: A two-tuple where the first element is a Boolean reflecting the results of the validation tests and the second is a message indicating any reason why the validation may have failed. :rtype: (Boolean, safe.messaging.Message) Example:: flag,message = self._validate_question_area() """ hazard_index = self.hazard_layer_combo.currentIndex() exposure_index = self.exposure_layer_combo.currentIndex() if hazard_index == -1 or exposure_index == -1: if self.conflicting_plugin_detected: message = conflicting_plugin_message() else: message = getting_started_message() return False, message else: return True, None
['def', '_validate_question_area', '(', 'self', ')', ':', 'hazard_index', '=', 'self', '.', 'hazard_layer_combo', '.', 'currentIndex', '(', ')', 'exposure_index', '=', 'self', '.', 'exposure_layer_combo', '.', 'currentIndex', '(', ')', 'if', 'hazard_index', '==', '-', '1', 'or', 'exposure_index', '==', '-', '1', ':', 'if', 'self', '.', 'conflicting_plugin_detected', ':', 'message', '=', 'conflicting_plugin_message', '(', ')', 'else', ':', 'message', '=', 'getting_started_message', '(', ')', 'return', 'False', ',', 'message', 'else', ':', 'return', 'True', ',', 'None']
Helper method to evaluate the current state of the dialog. This function will determine if it is appropriate for the OK button to be enabled or not. .. note:: The enabled state of the OK button on the dialog will NOT be updated (set True or False) depending on the outcome of the UI readiness tests performed - **only** True or False will be returned by the function. :returns: A two-tuple where the first element is a Boolean reflecting the results of the validation tests and the second is a message indicating any reason why the validation may have failed. :rtype: (Boolean, safe.messaging.Message) Example:: flag,message = self._validate_question_area()
['Helper', 'method', 'to', 'evaluate', 'the', 'current', 'state', 'of', 'the', 'dialog', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/widgets/dock.py#L1564-L1593
8,870
switchboardpy/switchboard
switchboard/models.py
Model.update
def update(cls, spec, updates, upsert=False): ''' The spec is used to search for the data to update, updates contains the values to be updated, and upsert specifies whether to do an insert if the original data is not found. ''' if 'key' in spec: previous = cls.get(spec['key']) else: previous = None if previous: # Update existing data. current = cls(**previous.__dict__) elif upsert: # Create new data. current = cls(**spec) else: current = None # XXX Should there be any error thrown if this is a noop? if current: current.__dict__.update(updates) current.save() return current
python
def update(cls, spec, updates, upsert=False): ''' The spec is used to search for the data to update, updates contains the values to be updated, and upsert specifies whether to do an insert if the original data is not found. ''' if 'key' in spec: previous = cls.get(spec['key']) else: previous = None if previous: # Update existing data. current = cls(**previous.__dict__) elif upsert: # Create new data. current = cls(**spec) else: current = None # XXX Should there be any error thrown if this is a noop? if current: current.__dict__.update(updates) current.save() return current
['def', 'update', '(', 'cls', ',', 'spec', ',', 'updates', ',', 'upsert', '=', 'False', ')', ':', 'if', "'key'", 'in', 'spec', ':', 'previous', '=', 'cls', '.', 'get', '(', 'spec', '[', "'key'", ']', ')', 'else', ':', 'previous', '=', 'None', 'if', 'previous', ':', '# Update existing data.', 'current', '=', 'cls', '(', '*', '*', 'previous', '.', '__dict__', ')', 'elif', 'upsert', ':', '# Create new data.', 'current', '=', 'cls', '(', '*', '*', 'spec', ')', 'else', ':', 'current', '=', 'None', '# XXX Should there be any error thrown if this is a noop?', 'if', 'current', ':', 'current', '.', '__dict__', '.', 'update', '(', 'updates', ')', 'current', '.', 'save', '(', ')', 'return', 'current']
The spec is used to search for the data to update, updates contains the values to be updated, and upsert specifies whether to do an insert if the original data is not found.
['The', 'spec', 'is', 'used', 'to', 'search', 'for', 'the', 'data', 'to', 'update', 'updates', 'contains', 'the', 'values', 'to', 'be', 'updated', 'and', 'upsert', 'specifies', 'whether', 'to', 'do', 'an', 'insert', 'if', 'the', 'original', 'data', 'is', 'not', 'found', '.']
train
https://github.com/switchboardpy/switchboard/blob/074b4838dbe140cb8f89d3c25ae25e70a29f9553/switchboard/models.py#L124-L146
8,871
proycon/flat
flat/modes/viewer/views.py
pub_poll
def pub_poll(request, docid): """The initial viewer, does not provide the document content yet""" try: r = flat.comm.get(request, '/poll/pub/' + docid + '/', False) except URLError: return HttpResponseForbidden("Unable to connect to the document server [viewer/poll]") return HttpResponse(r, content_type='application/json')
python
def pub_poll(request, docid): """The initial viewer, does not provide the document content yet""" try: r = flat.comm.get(request, '/poll/pub/' + docid + '/', False) except URLError: return HttpResponseForbidden("Unable to connect to the document server [viewer/poll]") return HttpResponse(r, content_type='application/json')
['def', 'pub_poll', '(', 'request', ',', 'docid', ')', ':', 'try', ':', 'r', '=', 'flat', '.', 'comm', '.', 'get', '(', 'request', ',', "'/poll/pub/'", '+', 'docid', '+', "'/'", ',', 'False', ')', 'except', 'URLError', ':', 'return', 'HttpResponseForbidden', '(', '"Unable to connect to the document server [viewer/poll]"', ')', 'return', 'HttpResponse', '(', 'r', ',', 'content_type', '=', "'application/json'", ')']
The initial viewer, does not provide the document content yet
['The', 'initial', 'viewer', 'does', 'not', 'provide', 'the', 'document', 'content', 'yet']
train
https://github.com/proycon/flat/blob/f14eea61edcae8656dadccd9a43481ff7e710ffb/flat/modes/viewer/views.py#L37-L43
8,872
pinterest/pymemcache
pymemcache/client/base.py
Client.prepend
def prepend(self, key, value, expire=0, noreply=None): """ The memcached "prepend" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True. """ if noreply is None: noreply = self.default_noreply return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key]
python
def prepend(self, key, value, expire=0, noreply=None): """ The memcached "prepend" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True. """ if noreply is None: noreply = self.default_noreply return self._store_cmd(b'prepend', {key: value}, expire, noreply)[key]
['def', 'prepend', '(', 'self', ',', 'key', ',', 'value', ',', 'expire', '=', '0', ',', 'noreply', '=', 'None', ')', ':', 'if', 'noreply', 'is', 'None', ':', 'noreply', '=', 'self', '.', 'default_noreply', 'return', 'self', '.', '_store_cmd', '(', "b'prepend'", ',', '{', 'key', ':', 'value', '}', ',', 'expire', ',', 'noreply', ')', '[', 'key', ']']
The memcached "prepend" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
['The', 'memcached', 'prepend', 'command', '.']
train
https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L401-L418
8,873
novopl/peltak
src/peltak/extra/gitflow/logic/feature.py
start
def start(name): # type: (str) -> None """ Start working on a new feature by branching off develop. This will create a new branch off develop called feature/<name>. Args: name (str): The name of the new feature. """ feature_name = 'feature/' + common.to_branch_name(name) develop = conf.get('git.devel_branch', 'develop') common.assert_on_branch(develop) common.git_checkout(feature_name, create=True)
python
def start(name): # type: (str) -> None """ Start working on a new feature by branching off develop. This will create a new branch off develop called feature/<name>. Args: name (str): The name of the new feature. """ feature_name = 'feature/' + common.to_branch_name(name) develop = conf.get('git.devel_branch', 'develop') common.assert_on_branch(develop) common.git_checkout(feature_name, create=True)
['def', 'start', '(', 'name', ')', ':', '# type: (str) -> None', 'feature_name', '=', "'feature/'", '+', 'common', '.', 'to_branch_name', '(', 'name', ')', 'develop', '=', 'conf', '.', 'get', '(', "'git.devel_branch'", ',', "'develop'", ')', 'common', '.', 'assert_on_branch', '(', 'develop', ')', 'common', '.', 'git_checkout', '(', 'feature_name', ',', 'create', '=', 'True', ')']
Start working on a new feature by branching off develop. This will create a new branch off develop called feature/<name>. Args: name (str): The name of the new feature.
['Start', 'working', 'on', 'a', 'new', 'feature', 'by', 'branching', 'off', 'develop', '.']
train
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/feature.py#L30-L44
8,874
twilio/twilio-python
twilio/rest/proxy/v1/service/__init__.py
ServiceInstance.update
def update(self, unique_name=values.unset, default_ttl=values.unset, callback_url=values.unset, geo_match_level=values.unset, number_selection_behavior=values.unset, intercept_callback_url=values.unset, out_of_session_callback_url=values.unset, chat_instance_sid=values.unset): """ Update the ServiceInstance :param unicode unique_name: An application-defined string that uniquely identifies the resource :param unicode default_ttl: Default TTL for a Session, in seconds :param unicode callback_url: The URL we should call when the interaction status changes :param ServiceInstance.GeoMatchLevel geo_match_level: Where a proxy number must be located relative to the participant identifier :param ServiceInstance.NumberSelectionBehavior number_selection_behavior: The preference for Proxy Number selection for the Service instance :param unicode intercept_callback_url: The URL we call on each interaction :param unicode out_of_session_callback_url: The URL we call when an inbound call or SMS action occurs on a closed or non-existent Session :param unicode chat_instance_sid: The SID of the Chat Service Instance :returns: Updated ServiceInstance :rtype: twilio.rest.proxy.v1.service.ServiceInstance """ return self._proxy.update( unique_name=unique_name, default_ttl=default_ttl, callback_url=callback_url, geo_match_level=geo_match_level, number_selection_behavior=number_selection_behavior, intercept_callback_url=intercept_callback_url, out_of_session_callback_url=out_of_session_callback_url, chat_instance_sid=chat_instance_sid, )
python
def update(self, unique_name=values.unset, default_ttl=values.unset, callback_url=values.unset, geo_match_level=values.unset, number_selection_behavior=values.unset, intercept_callback_url=values.unset, out_of_session_callback_url=values.unset, chat_instance_sid=values.unset): """ Update the ServiceInstance :param unicode unique_name: An application-defined string that uniquely identifies the resource :param unicode default_ttl: Default TTL for a Session, in seconds :param unicode callback_url: The URL we should call when the interaction status changes :param ServiceInstance.GeoMatchLevel geo_match_level: Where a proxy number must be located relative to the participant identifier :param ServiceInstance.NumberSelectionBehavior number_selection_behavior: The preference for Proxy Number selection for the Service instance :param unicode intercept_callback_url: The URL we call on each interaction :param unicode out_of_session_callback_url: The URL we call when an inbound call or SMS action occurs on a closed or non-existent Session :param unicode chat_instance_sid: The SID of the Chat Service Instance :returns: Updated ServiceInstance :rtype: twilio.rest.proxy.v1.service.ServiceInstance """ return self._proxy.update( unique_name=unique_name, default_ttl=default_ttl, callback_url=callback_url, geo_match_level=geo_match_level, number_selection_behavior=number_selection_behavior, intercept_callback_url=intercept_callback_url, out_of_session_callback_url=out_of_session_callback_url, chat_instance_sid=chat_instance_sid, )
['def', 'update', '(', 'self', ',', 'unique_name', '=', 'values', '.', 'unset', ',', 'default_ttl', '=', 'values', '.', 'unset', ',', 'callback_url', '=', 'values', '.', 'unset', ',', 'geo_match_level', '=', 'values', '.', 'unset', ',', 'number_selection_behavior', '=', 'values', '.', 'unset', ',', 'intercept_callback_url', '=', 'values', '.', 'unset', ',', 'out_of_session_callback_url', '=', 'values', '.', 'unset', ',', 'chat_instance_sid', '=', 'values', '.', 'unset', ')', ':', 'return', 'self', '.', '_proxy', '.', 'update', '(', 'unique_name', '=', 'unique_name', ',', 'default_ttl', '=', 'default_ttl', ',', 'callback_url', '=', 'callback_url', ',', 'geo_match_level', '=', 'geo_match_level', ',', 'number_selection_behavior', '=', 'number_selection_behavior', ',', 'intercept_callback_url', '=', 'intercept_callback_url', ',', 'out_of_session_callback_url', '=', 'out_of_session_callback_url', ',', 'chat_instance_sid', '=', 'chat_instance_sid', ',', ')']
Update the ServiceInstance :param unicode unique_name: An application-defined string that uniquely identifies the resource :param unicode default_ttl: Default TTL for a Session, in seconds :param unicode callback_url: The URL we should call when the interaction status changes :param ServiceInstance.GeoMatchLevel geo_match_level: Where a proxy number must be located relative to the participant identifier :param ServiceInstance.NumberSelectionBehavior number_selection_behavior: The preference for Proxy Number selection for the Service instance :param unicode intercept_callback_url: The URL we call on each interaction :param unicode out_of_session_callback_url: The URL we call when an inbound call or SMS action occurs on a closed or non-existent Session :param unicode chat_instance_sid: The SID of the Chat Service Instance :returns: Updated ServiceInstance :rtype: twilio.rest.proxy.v1.service.ServiceInstance
['Update', 'the', 'ServiceInstance']
train
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/proxy/v1/service/__init__.py#L558-L588
8,875
phac-nml/sistr_cmd
sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py
min_row_dist_sum_idx
def min_row_dist_sum_idx(dists): """Find the index of the row with the minimum row distance sum This should return the index of the row index with the least distance overall to all other rows. Args: dists (np.array): must be square distance matrix Returns: int: index of row with min dist row sum """ row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum) return row_sums.argmin()
python
def min_row_dist_sum_idx(dists): """Find the index of the row with the minimum row distance sum This should return the index of the row index with the least distance overall to all other rows. Args: dists (np.array): must be square distance matrix Returns: int: index of row with min dist row sum """ row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum) return row_sums.argmin()
['def', 'min_row_dist_sum_idx', '(', 'dists', ')', ':', 'row_sums', '=', 'np', '.', 'apply_along_axis', '(', 'arr', '=', 'dists', ',', 'axis', '=', '0', ',', 'func1d', '=', 'np', '.', 'sum', ')', 'return', 'row_sums', '.', 'argmin', '(', ')']
Find the index of the row with the minimum row distance sum This should return the index of the row index with the least distance overall to all other rows. Args: dists (np.array): must be square distance matrix Returns: int: index of row with min dist row sum
['Find', 'the', 'index', 'of', 'the', 'row', 'with', 'the', 'minimum', 'row', 'distance', 'sum']
train
https://github.com/phac-nml/sistr_cmd/blob/4630fae72439723b354a94b94fbe76ad2f9f6295/sistr/src/cgmlst/extras/centroid_cgmlst_alleles.py#L84-L97
8,876
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
BaseCase.assert_link_text
def assert_link_text(self, link_text, timeout=settings.SMALL_TIMEOUT): """ Similar to wait_for_link_text_visible(), but returns nothing. As above, will raise an exception if nothing can be found. Returns True if successful. Default timeout = SMALL_TIMEOUT. """ if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT: timeout = self.__get_new_timeout(timeout) self.wait_for_link_text_visible(link_text, timeout=timeout) if self.demo_mode: messenger_post = ("ASSERT LINK TEXT {%s}." % link_text) self.__highlight_with_assert_success( messenger_post, link_text, by=By.LINK_TEXT) return True
python
def assert_link_text(self, link_text, timeout=settings.SMALL_TIMEOUT): """ Similar to wait_for_link_text_visible(), but returns nothing. As above, will raise an exception if nothing can be found. Returns True if successful. Default timeout = SMALL_TIMEOUT. """ if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT: timeout = self.__get_new_timeout(timeout) self.wait_for_link_text_visible(link_text, timeout=timeout) if self.demo_mode: messenger_post = ("ASSERT LINK TEXT {%s}." % link_text) self.__highlight_with_assert_success( messenger_post, link_text, by=By.LINK_TEXT) return True
['def', 'assert_link_text', '(', 'self', ',', 'link_text', ',', 'timeout', '=', 'settings', '.', 'SMALL_TIMEOUT', ')', ':', 'if', 'self', '.', 'timeout_multiplier', 'and', 'timeout', '==', 'settings', '.', 'SMALL_TIMEOUT', ':', 'timeout', '=', 'self', '.', '__get_new_timeout', '(', 'timeout', ')', 'self', '.', 'wait_for_link_text_visible', '(', 'link_text', ',', 'timeout', '=', 'timeout', ')', 'if', 'self', '.', 'demo_mode', ':', 'messenger_post', '=', '(', '"ASSERT LINK TEXT {%s}."', '%', 'link_text', ')', 'self', '.', '__highlight_with_assert_success', '(', 'messenger_post', ',', 'link_text', ',', 'by', '=', 'By', '.', 'LINK_TEXT', ')', 'return', 'True']
Similar to wait_for_link_text_visible(), but returns nothing. As above, will raise an exception if nothing can be found. Returns True if successful. Default timeout = SMALL_TIMEOUT.
['Similar', 'to', 'wait_for_link_text_visible', '()', 'but', 'returns', 'nothing', '.', 'As', 'above', 'will', 'raise', 'an', 'exception', 'if', 'nothing', 'can', 'be', 'found', '.', 'Returns', 'True', 'if', 'successful', '.', 'Default', 'timeout', '=', 'SMALL_TIMEOUT', '.']
train
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L2319-L2330
8,877
nameko/nameko
nameko/messaging.py
QueueConsumer.get_consumers
def get_consumers(self, consumer_cls, channel): """ Kombu callback to set up consumers. Called after any (re)connection to the broker. """ _log.debug('setting up consumers %s', self) for provider in self._providers: callbacks = [partial(self.handle_message, provider)] consumer = consumer_cls( queues=[provider.queue], callbacks=callbacks, accept=self.accept ) consumer.qos(prefetch_count=self.prefetch_count) self._consumers[provider] = consumer return self._consumers.values()
python
def get_consumers(self, consumer_cls, channel): """ Kombu callback to set up consumers. Called after any (re)connection to the broker. """ _log.debug('setting up consumers %s', self) for provider in self._providers: callbacks = [partial(self.handle_message, provider)] consumer = consumer_cls( queues=[provider.queue], callbacks=callbacks, accept=self.accept ) consumer.qos(prefetch_count=self.prefetch_count) self._consumers[provider] = consumer return self._consumers.values()
['def', 'get_consumers', '(', 'self', ',', 'consumer_cls', ',', 'channel', ')', ':', '_log', '.', 'debug', '(', "'setting up consumers %s'", ',', 'self', ')', 'for', 'provider', 'in', 'self', '.', '_providers', ':', 'callbacks', '=', '[', 'partial', '(', 'self', '.', 'handle_message', ',', 'provider', ')', ']', 'consumer', '=', 'consumer_cls', '(', 'queues', '=', '[', 'provider', '.', 'queue', ']', ',', 'callbacks', '=', 'callbacks', ',', 'accept', '=', 'self', '.', 'accept', ')', 'consumer', '.', 'qos', '(', 'prefetch_count', '=', 'self', '.', 'prefetch_count', ')', 'self', '.', '_consumers', '[', 'provider', ']', '=', 'consumer', 'return', 'self', '.', '_consumers', '.', 'values', '(', ')']
Kombu callback to set up consumers. Called after any (re)connection to the broker.
['Kombu', 'callback', 'to', 'set', 'up', 'consumers', '.']
train
https://github.com/nameko/nameko/blob/88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d/nameko/messaging.py#L369-L388
8,878
PmagPy/PmagPy
SPD/lib/lib_ptrm_statistics.py
get_delta_pal
def get_delta_pal(b, b_star): """ input: b, b_star (actual and corrected slope) output: delta_pal """ delta_pal = numpy.abs(old_div((b - b_star), b)) * 100 return delta_pal
python
def get_delta_pal(b, b_star): """ input: b, b_star (actual and corrected slope) output: delta_pal """ delta_pal = numpy.abs(old_div((b - b_star), b)) * 100 return delta_pal
['def', 'get_delta_pal', '(', 'b', ',', 'b_star', ')', ':', 'delta_pal', '=', 'numpy', '.', 'abs', '(', 'old_div', '(', '(', 'b', '-', 'b_star', ')', ',', 'b', ')', ')', '*', '100', 'return', 'delta_pal']
input: b, b_star (actual and corrected slope) output: delta_pal
['input', ':', 'b', 'b_star', '(', 'actual', 'and', 'corrected', 'slope', ')', 'output', ':', 'delta_pal']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_ptrm_statistics.py#L215-L221
8,879
ttroy50/pyephember
pyephember/pyephember.py
EphEmber.set_target_temperature_by_id
def set_target_temperature_by_id(self, zone_id, target_temperature): """ Set the target temperature for a zone by id """ if not self._do_auth(): raise RuntimeError("Unable to login") data = { "ZoneId": zone_id, "TargetTemperature": target_temperature } headers = { "Accept": "application/json", "Content-Type": "application/json", 'Authorization': 'Bearer ' + self.login_data['token']['accessToken'] } url = self.api_base_url + "Home/ZoneTargetTemperature" response = requests.post(url, data=json.dumps( data), headers=headers, timeout=10) if response.status_code != 200: return False zone_change_data = response.json() return zone_change_data.get("isSuccess", False)
python
def set_target_temperature_by_id(self, zone_id, target_temperature): """ Set the target temperature for a zone by id """ if not self._do_auth(): raise RuntimeError("Unable to login") data = { "ZoneId": zone_id, "TargetTemperature": target_temperature } headers = { "Accept": "application/json", "Content-Type": "application/json", 'Authorization': 'Bearer ' + self.login_data['token']['accessToken'] } url = self.api_base_url + "Home/ZoneTargetTemperature" response = requests.post(url, data=json.dumps( data), headers=headers, timeout=10) if response.status_code != 200: return False zone_change_data = response.json() return zone_change_data.get("isSuccess", False)
['def', 'set_target_temperature_by_id', '(', 'self', ',', 'zone_id', ',', 'target_temperature', ')', ':', 'if', 'not', 'self', '.', '_do_auth', '(', ')', ':', 'raise', 'RuntimeError', '(', '"Unable to login"', ')', 'data', '=', '{', '"ZoneId"', ':', 'zone_id', ',', '"TargetTemperature"', ':', 'target_temperature', '}', 'headers', '=', '{', '"Accept"', ':', '"application/json"', ',', '"Content-Type"', ':', '"application/json"', ',', "'Authorization'", ':', "'Bearer '", '+', 'self', '.', 'login_data', '[', "'token'", ']', '[', "'accessToken'", ']', '}', 'url', '=', 'self', '.', 'api_base_url', '+', '"Home/ZoneTargetTemperature"', 'response', '=', 'requests', '.', 'post', '(', 'url', ',', 'data', '=', 'json', '.', 'dumps', '(', 'data', ')', ',', 'headers', '=', 'headers', ',', 'timeout', '=', '10', ')', 'if', 'response', '.', 'status_code', '!=', '200', ':', 'return', 'False', 'zone_change_data', '=', 'response', '.', 'json', '(', ')', 'return', 'zone_change_data', '.', 'get', '(', '"isSuccess"', ',', 'False', ')']
Set the target temperature for a zone by id
['Set', 'the', 'target', 'temperature', 'for', 'a', 'zone', 'by', 'id']
train
https://github.com/ttroy50/pyephember/blob/3ee159ee82b926b957dae8dcbc7a4bfb6807a9b4/pyephember/pyephember.py#L243-L272
8,880
projectshift/shift-boiler
boiler/user/models.py
User.verify_password
def verify_password(self, password): """ Verify a given string for being valid password """ if self.password is None: return False from boiler.user.util.passlib import passlib_context return passlib_context.verify(str(password), self.password)
python
def verify_password(self, password): """ Verify a given string for being valid password """ if self.password is None: return False from boiler.user.util.passlib import passlib_context return passlib_context.verify(str(password), self.password)
['def', 'verify_password', '(', 'self', ',', 'password', ')', ':', 'if', 'self', '.', 'password', 'is', 'None', ':', 'return', 'False', 'from', 'boiler', '.', 'user', '.', 'util', '.', 'passlib', 'import', 'passlib_context', 'return', 'passlib_context', '.', 'verify', '(', 'str', '(', 'password', ')', ',', 'self', '.', 'password', ')']
Verify a given string for being valid password
['Verify', 'a', 'given', 'string', 'for', 'being', 'valid', 'password']
train
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/models.py#L332-L338
8,881
pnegahdar/inenv
inenv/inenv.py
toggle_autojump
def toggle_autojump(): """Toggles Autojump""" if not autojump_enabled(): with open(AUTOJUMP_FILE, 'w+') as ajfile: ajfile.write("enabled") else: os.remove(AUTOJUMP_FILE)
python
def toggle_autojump(): """Toggles Autojump""" if not autojump_enabled(): with open(AUTOJUMP_FILE, 'w+') as ajfile: ajfile.write("enabled") else: os.remove(AUTOJUMP_FILE)
['def', 'toggle_autojump', '(', ')', ':', 'if', 'not', 'autojump_enabled', '(', ')', ':', 'with', 'open', '(', 'AUTOJUMP_FILE', ',', "'w+'", ')', 'as', 'ajfile', ':', 'ajfile', '.', 'write', '(', '"enabled"', ')', 'else', ':', 'os', '.', 'remove', '(', 'AUTOJUMP_FILE', ')']
Toggles Autojump
['Toggles', 'Autojump']
train
https://github.com/pnegahdar/inenv/blob/8f484e520892bf9eb59f91b4b5c92df9fbd9a4e6/inenv/inenv.py#L228-L234
8,882
tanghaibao/jcvi
jcvi/utils/range.py
range_distance
def range_distance(a, b, distmode='ss'): """ Returns the distance between two ranges. distmode is ss, se, es, ee and sets the place on read one and two to measure the distance (s = start, e = end) >>> range_distance(("1", 30, 45, '+'), ("1", 45, 55, '+')) (26, '++') >>> range_distance(("1", 30, 45, '-'), ("1", 57, 68, '-')) (39, '--') >>> range_distance(("1", 30, 42, '-'), ("1", 45, 55, '+')) (26, '-+') >>> range_distance(("1", 30, 42, '+'), ("1", 45, 55, '-'), distmode='ee') (2, '+-') """ assert distmode in ('ss', 'ee') a_chr, a_min, a_max, a_strand = a b_chr, b_min, b_max, b_strand = b # must be on the same chromosome if a_chr != b_chr: dist = -1 #elif range_overlap(a[:3], b[:3]): # dist = 0 else: # If the two ranges do not overlap, check stranded-ness and distance if a_min > b_min: a_min, b_min = b_min, a_min a_max, b_max = b_max, a_max a_strand, b_strand = b_strand, a_strand if distmode == "ss": dist = b_max - a_min + 1 elif distmode == "ee": dist = b_min - a_max - 1 orientation = a_strand + b_strand return dist, orientation
python
def range_distance(a, b, distmode='ss'): """ Returns the distance between two ranges. distmode is ss, se, es, ee and sets the place on read one and two to measure the distance (s = start, e = end) >>> range_distance(("1", 30, 45, '+'), ("1", 45, 55, '+')) (26, '++') >>> range_distance(("1", 30, 45, '-'), ("1", 57, 68, '-')) (39, '--') >>> range_distance(("1", 30, 42, '-'), ("1", 45, 55, '+')) (26, '-+') >>> range_distance(("1", 30, 42, '+'), ("1", 45, 55, '-'), distmode='ee') (2, '+-') """ assert distmode in ('ss', 'ee') a_chr, a_min, a_max, a_strand = a b_chr, b_min, b_max, b_strand = b # must be on the same chromosome if a_chr != b_chr: dist = -1 #elif range_overlap(a[:3], b[:3]): # dist = 0 else: # If the two ranges do not overlap, check stranded-ness and distance if a_min > b_min: a_min, b_min = b_min, a_min a_max, b_max = b_max, a_max a_strand, b_strand = b_strand, a_strand if distmode == "ss": dist = b_max - a_min + 1 elif distmode == "ee": dist = b_min - a_max - 1 orientation = a_strand + b_strand return dist, orientation
['def', 'range_distance', '(', 'a', ',', 'b', ',', 'distmode', '=', "'ss'", ')', ':', 'assert', 'distmode', 'in', '(', "'ss'", ',', "'ee'", ')', 'a_chr', ',', 'a_min', ',', 'a_max', ',', 'a_strand', '=', 'a', 'b_chr', ',', 'b_min', ',', 'b_max', ',', 'b_strand', '=', 'b', '# must be on the same chromosome', 'if', 'a_chr', '!=', 'b_chr', ':', 'dist', '=', '-', '1', '#elif range_overlap(a[:3], b[:3]):', '# dist = 0', 'else', ':', '# If the two ranges do not overlap, check stranded-ness and distance', 'if', 'a_min', '>', 'b_min', ':', 'a_min', ',', 'b_min', '=', 'b_min', ',', 'a_min', 'a_max', ',', 'b_max', '=', 'b_max', ',', 'a_max', 'a_strand', ',', 'b_strand', '=', 'b_strand', ',', 'a_strand', 'if', 'distmode', '==', '"ss"', ':', 'dist', '=', 'b_max', '-', 'a_min', '+', '1', 'elif', 'distmode', '==', '"ee"', ':', 'dist', '=', 'b_min', '-', 'a_max', '-', '1', 'orientation', '=', 'a_strand', '+', 'b_strand', 'return', 'dist', ',', 'orientation']
Returns the distance between two ranges. distmode is ss, se, es, ee and sets the place on read one and two to measure the distance (s = start, e = end) >>> range_distance(("1", 30, 45, '+'), ("1", 45, 55, '+')) (26, '++') >>> range_distance(("1", 30, 45, '-'), ("1", 57, 68, '-')) (39, '--') >>> range_distance(("1", 30, 42, '-'), ("1", 45, 55, '+')) (26, '-+') >>> range_distance(("1", 30, 42, '+'), ("1", 45, 55, '-'), distmode='ee') (2, '+-')
['Returns', 'the', 'distance', 'between', 'two', 'ranges', '.']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/range.py#L116-L155
8,883
google/grr
grr/server/grr_response_server/gui/api_plugins/vfs.py
ApiAff4ObjectRepresentation.InitFromAff4Object
def InitFromAff4Object(self, aff4_obj): """Initializes the current instance from an Aff4Object. Iterates the inheritance hierarchy of the given Aff4Object and adds a ApiAff4ObjectType for each class found in the hierarchy. Args: aff4_obj: An Aff4Object as source for the initialization. Returns: A reference to the current instance. """ attr_blacklist = [] # We use this to show attributes only once. self.types = [] for aff4_cls in aff4_obj.__class__.__mro__: if not hasattr(aff4_cls, "SchemaCls"): continue type_repr = ApiAff4ObjectType().InitFromAff4Object( aff4_obj, aff4_cls, attr_blacklist) if type_repr.attributes: self.types.append(type_repr) # Add all attribute names from this type representation to the # blacklist to not add them to the result again. attr_blacklist.extend([attr.name for attr in type_repr.attributes]) return self
python
def InitFromAff4Object(self, aff4_obj): """Initializes the current instance from an Aff4Object. Iterates the inheritance hierarchy of the given Aff4Object and adds a ApiAff4ObjectType for each class found in the hierarchy. Args: aff4_obj: An Aff4Object as source for the initialization. Returns: A reference to the current instance. """ attr_blacklist = [] # We use this to show attributes only once. self.types = [] for aff4_cls in aff4_obj.__class__.__mro__: if not hasattr(aff4_cls, "SchemaCls"): continue type_repr = ApiAff4ObjectType().InitFromAff4Object( aff4_obj, aff4_cls, attr_blacklist) if type_repr.attributes: self.types.append(type_repr) # Add all attribute names from this type representation to the # blacklist to not add them to the result again. attr_blacklist.extend([attr.name for attr in type_repr.attributes]) return self
['def', 'InitFromAff4Object', '(', 'self', ',', 'aff4_obj', ')', ':', 'attr_blacklist', '=', '[', ']', '# We use this to show attributes only once.', 'self', '.', 'types', '=', '[', ']', 'for', 'aff4_cls', 'in', 'aff4_obj', '.', '__class__', '.', '__mro__', ':', 'if', 'not', 'hasattr', '(', 'aff4_cls', ',', '"SchemaCls"', ')', ':', 'continue', 'type_repr', '=', 'ApiAff4ObjectType', '(', ')', '.', 'InitFromAff4Object', '(', 'aff4_obj', ',', 'aff4_cls', ',', 'attr_blacklist', ')', 'if', 'type_repr', '.', 'attributes', ':', 'self', '.', 'types', '.', 'append', '(', 'type_repr', ')', '# Add all attribute names from this type representation to the', '# blacklist to not add them to the result again.', 'attr_blacklist', '.', 'extend', '(', '[', 'attr', '.', 'name', 'for', 'attr', 'in', 'type_repr', '.', 'attributes', ']', ')', 'return', 'self']
Initializes the current instance from an Aff4Object. Iterates the inheritance hierarchy of the given Aff4Object and adds a ApiAff4ObjectType for each class found in the hierarchy. Args: aff4_obj: An Aff4Object as source for the initialization. Returns: A reference to the current instance.
['Initializes', 'the', 'current', 'instance', 'from', 'an', 'Aff4Object', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/api_plugins/vfs.py#L182-L211
8,884
openvax/pyensembl
pyensembl/common.py
_memoize_cache_key
def _memoize_cache_key(args, kwargs): """Turn args tuple and kwargs dictionary into a hashable key. Expects that all arguments to a memoized function are either hashable or can be uniquely identified from type(arg) and repr(arg). """ cache_key_list = [] # hack to get around the unhashability of lists, # add a special case to convert them to tuples for arg in args: if type(arg) is list: cache_key_list.append(tuple(arg)) else: cache_key_list.append(arg) for (k, v) in sorted(kwargs.items()): if type(v) is list: cache_key_list.append((k, tuple(v))) else: cache_key_list.append((k, v)) return tuple(cache_key_list)
python
def _memoize_cache_key(args, kwargs): """Turn args tuple and kwargs dictionary into a hashable key. Expects that all arguments to a memoized function are either hashable or can be uniquely identified from type(arg) and repr(arg). """ cache_key_list = [] # hack to get around the unhashability of lists, # add a special case to convert them to tuples for arg in args: if type(arg) is list: cache_key_list.append(tuple(arg)) else: cache_key_list.append(arg) for (k, v) in sorted(kwargs.items()): if type(v) is list: cache_key_list.append((k, tuple(v))) else: cache_key_list.append((k, v)) return tuple(cache_key_list)
['def', '_memoize_cache_key', '(', 'args', ',', 'kwargs', ')', ':', 'cache_key_list', '=', '[', ']', '# hack to get around the unhashability of lists,', '# add a special case to convert them to tuples', 'for', 'arg', 'in', 'args', ':', 'if', 'type', '(', 'arg', ')', 'is', 'list', ':', 'cache_key_list', '.', 'append', '(', 'tuple', '(', 'arg', ')', ')', 'else', ':', 'cache_key_list', '.', 'append', '(', 'arg', ')', 'for', '(', 'k', ',', 'v', ')', 'in', 'sorted', '(', 'kwargs', '.', 'items', '(', ')', ')', ':', 'if', 'type', '(', 'v', ')', 'is', 'list', ':', 'cache_key_list', '.', 'append', '(', '(', 'k', ',', 'tuple', '(', 'v', ')', ')', ')', 'else', ':', 'cache_key_list', '.', 'append', '(', '(', 'k', ',', 'v', ')', ')', 'return', 'tuple', '(', 'cache_key_list', ')']
Turn args tuple and kwargs dictionary into a hashable key. Expects that all arguments to a memoized function are either hashable or can be uniquely identified from type(arg) and repr(arg).
['Turn', 'args', 'tuple', 'and', 'kwargs', 'dictionary', 'into', 'a', 'hashable', 'key', '.']
train
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/common.py#L31-L51
8,885
deshima-dev/decode
decode/joke/functions.py
youtube
def youtube(keyword=None): """Open youtube. Args: keyword (optional): Search word. """ if keyword is None: web.open('https://www.youtube.com/watch?v=L_mBVT2jBFw') else: web.open(quote('https://www.youtube.com/results?search_query={}'.format(keyword), RESERVED))
python
def youtube(keyword=None): """Open youtube. Args: keyword (optional): Search word. """ if keyword is None: web.open('https://www.youtube.com/watch?v=L_mBVT2jBFw') else: web.open(quote('https://www.youtube.com/results?search_query={}'.format(keyword), RESERVED))
['def', 'youtube', '(', 'keyword', '=', 'None', ')', ':', 'if', 'keyword', 'is', 'None', ':', 'web', '.', 'open', '(', "'https://www.youtube.com/watch?v=L_mBVT2jBFw'", ')', 'else', ':', 'web', '.', 'open', '(', 'quote', '(', "'https://www.youtube.com/results?search_query={}'", '.', 'format', '(', 'keyword', ')', ',', 'RESERVED', ')', ')']
Open youtube. Args: keyword (optional): Search word.
['Open', 'youtube', '.']
train
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/joke/functions.py#L21-L30
8,886
NuGrid/NuGridPy
nugridpy/mesa.py
history_data.xche4_teff
def xche4_teff(self,ifig=None,lims=[1.,0.,3.4,4.7],label=None,colour=None, s2ms=True,dashes=None): """ Plot effective temperature against central helium abundance. Parameters ---------- ifig : integer or string Figure label, if None the current figure is used The default value is None. lims : list [x_lower, x_upper, y_lower, y_upper] label : string Label for the model The default value is None colour : string The colour of the line The default value is None s2ms : boolean, optional "Skip to Main Sequence" The default is True dashes : list, optional Custom dashing style. If None, ignore. The default is None. """ fsize=18 params = {'axes.labelsize': fsize, # 'font.family': 'serif', 'font.family': 'Times New Roman', 'figure.facecolor': 'white', 'text.fontsize': fsize, 'legend.fontsize': fsize, 'xtick.labelsize': fsize*0.8, 'ytick.labelsize': fsize*0.8, 'text.usetex': False} try: pl.rcParams.update(params) except: pass if s2ms: h1=self.get('center_h1') idx=np.where(h1[0]-h1>=1.e-3)[0][0] skip=idx else: skip=0 x = self.get('center_he4')[skip:] y = self.get('log_Teff')[skip:] if ifig is not None: pl.figure(ifig) if label is not None: if colour is not None: line,=pl.plot(x,y,label=label,color=colour) else: line,=pl.plot(x,y,label=label) pl.legend(loc='best').draw_frame(False) else: if colour is not None: line,=pl.plot(x,y,color=colour) else: line,=pl.plot(x,y) if dashes is not None: line.set_dashes(dashes) if label is not None: pl.legend(loc='best').draw_frame(False) pl.xlim(lims[:2]) pl.ylim(lims[2:]) pl.xlabel('$X_{\\rm c}(\,^4{\\rm He}\,)$') pl.ylabel('$\log\,T_{\\rm eff}$')
python
def xche4_teff(self,ifig=None,lims=[1.,0.,3.4,4.7],label=None,colour=None, s2ms=True,dashes=None): """ Plot effective temperature against central helium abundance. Parameters ---------- ifig : integer or string Figure label, if None the current figure is used The default value is None. lims : list [x_lower, x_upper, y_lower, y_upper] label : string Label for the model The default value is None colour : string The colour of the line The default value is None s2ms : boolean, optional "Skip to Main Sequence" The default is True dashes : list, optional Custom dashing style. If None, ignore. The default is None. """ fsize=18 params = {'axes.labelsize': fsize, # 'font.family': 'serif', 'font.family': 'Times New Roman', 'figure.facecolor': 'white', 'text.fontsize': fsize, 'legend.fontsize': fsize, 'xtick.labelsize': fsize*0.8, 'ytick.labelsize': fsize*0.8, 'text.usetex': False} try: pl.rcParams.update(params) except: pass if s2ms: h1=self.get('center_h1') idx=np.where(h1[0]-h1>=1.e-3)[0][0] skip=idx else: skip=0 x = self.get('center_he4')[skip:] y = self.get('log_Teff')[skip:] if ifig is not None: pl.figure(ifig) if label is not None: if colour is not None: line,=pl.plot(x,y,label=label,color=colour) else: line,=pl.plot(x,y,label=label) pl.legend(loc='best').draw_frame(False) else: if colour is not None: line,=pl.plot(x,y,color=colour) else: line,=pl.plot(x,y) if dashes is not None: line.set_dashes(dashes) if label is not None: pl.legend(loc='best').draw_frame(False) pl.xlim(lims[:2]) pl.ylim(lims[2:]) pl.xlabel('$X_{\\rm c}(\,^4{\\rm He}\,)$') pl.ylabel('$\log\,T_{\\rm eff}$')
['def', 'xche4_teff', '(', 'self', ',', 'ifig', '=', 'None', ',', 'lims', '=', '[', '1.', ',', '0.', ',', '3.4', ',', '4.7', ']', ',', 'label', '=', 'None', ',', 'colour', '=', 'None', ',', 's2ms', '=', 'True', ',', 'dashes', '=', 'None', ')', ':', 'fsize', '=', '18', 'params', '=', '{', "'axes.labelsize'", ':', 'fsize', ',', "# 'font.family': 'serif',", "'font.family'", ':', "'Times New Roman'", ',', "'figure.facecolor'", ':', "'white'", ',', "'text.fontsize'", ':', 'fsize', ',', "'legend.fontsize'", ':', 'fsize', ',', "'xtick.labelsize'", ':', 'fsize', '*', '0.8', ',', "'ytick.labelsize'", ':', 'fsize', '*', '0.8', ',', "'text.usetex'", ':', 'False', '}', 'try', ':', 'pl', '.', 'rcParams', '.', 'update', '(', 'params', ')', 'except', ':', 'pass', 'if', 's2ms', ':', 'h1', '=', 'self', '.', 'get', '(', "'center_h1'", ')', 'idx', '=', 'np', '.', 'where', '(', 'h1', '[', '0', ']', '-', 'h1', '>=', '1.e-3', ')', '[', '0', ']', '[', '0', ']', 'skip', '=', 'idx', 'else', ':', 'skip', '=', '0', 'x', '=', 'self', '.', 'get', '(', "'center_he4'", ')', '[', 'skip', ':', ']', 'y', '=', 'self', '.', 'get', '(', "'log_Teff'", ')', '[', 'skip', ':', ']', 'if', 'ifig', 'is', 'not', 'None', ':', 'pl', '.', 'figure', '(', 'ifig', ')', 'if', 'label', 'is', 'not', 'None', ':', 'if', 'colour', 'is', 'not', 'None', ':', 'line', ',', '=', 'pl', '.', 'plot', '(', 'x', ',', 'y', ',', 'label', '=', 'label', ',', 'color', '=', 'colour', ')', 'else', ':', 'line', ',', '=', 'pl', '.', 'plot', '(', 'x', ',', 'y', ',', 'label', '=', 'label', ')', 'pl', '.', 'legend', '(', 'loc', '=', "'best'", ')', '.', 'draw_frame', '(', 'False', ')', 'else', ':', 'if', 'colour', 'is', 'not', 'None', ':', 'line', ',', '=', 'pl', '.', 'plot', '(', 'x', ',', 'y', ',', 'color', '=', 'colour', ')', 'else', ':', 'line', ',', '=', 'pl', '.', 'plot', '(', 'x', ',', 'y', ')', 'if', 'dashes', 'is', 'not', 'None', ':', 'line', '.', 'set_dashes', '(', 'dashes', ')', 'if', 'label', 'is', 'not', 'None', ':', 'pl', '.', 'legend', '(', 'loc', '=', "'best'", ')', '.', 'draw_frame', '(', 'False', ')', 'pl', '.', 'xlim', '(', 'lims', '[', ':', '2', ']', ')', 'pl', '.', 'ylim', '(', 'lims', '[', '2', ':', ']', ')', 'pl', '.', 'xlabel', '(', "'$X_{\\\\rm c}(\\,^4{\\\\rm He}\\,)$'", ')', 'pl', '.', 'ylabel', '(', "'$\\log\\,T_{\\\\rm eff}$'", ')']
Plot effective temperature against central helium abundance. Parameters ---------- ifig : integer or string Figure label, if None the current figure is used The default value is None. lims : list [x_lower, x_upper, y_lower, y_upper] label : string Label for the model The default value is None colour : string The colour of the line The default value is None s2ms : boolean, optional "Skip to Main Sequence" The default is True dashes : list, optional Custom dashing style. If None, ignore. The default is None.
['Plot', 'effective', 'temperature', 'against', 'central', 'helium', 'abundance', '.']
train
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/mesa.py#L1110-L1184
8,887
sorgerlab/indra
indra/sources/hume/processor.py
HumeJsonLdProcessor._find_relations
def _find_relations(self): """Find all relevant relation elements and return them in a list.""" # Get all extractions extractions = \ list(self.tree.execute("$.extractions[(@.@type is 'Extraction')]")) # Get relations from extractions relations = [] for e in extractions: label_set = set(e.get('labels', [])) # If this is a DirectedRelation if 'DirectedRelation' in label_set: self.relation_dict[e['@id']] = e subtype = e.get('subtype') if any(t in subtype for t in polarities.keys()): relations.append((subtype, e)) # If this is an Event or an Entity if {'Event', 'Entity'} & label_set: self.concept_dict[e['@id']] = e if not relations and not self.relation_dict: logger.info("No relations found.") else: logger.info('%d relations of types %s found' % (len(relations), ', '.join(polarities.keys()))) logger.info('%d relations in dict.' % len(self.relation_dict)) logger.info('%d concepts found.' % len(self.concept_dict)) return relations
python
def _find_relations(self): """Find all relevant relation elements and return them in a list.""" # Get all extractions extractions = \ list(self.tree.execute("$.extractions[(@.@type is 'Extraction')]")) # Get relations from extractions relations = [] for e in extractions: label_set = set(e.get('labels', [])) # If this is a DirectedRelation if 'DirectedRelation' in label_set: self.relation_dict[e['@id']] = e subtype = e.get('subtype') if any(t in subtype for t in polarities.keys()): relations.append((subtype, e)) # If this is an Event or an Entity if {'Event', 'Entity'} & label_set: self.concept_dict[e['@id']] = e if not relations and not self.relation_dict: logger.info("No relations found.") else: logger.info('%d relations of types %s found' % (len(relations), ', '.join(polarities.keys()))) logger.info('%d relations in dict.' % len(self.relation_dict)) logger.info('%d concepts found.' % len(self.concept_dict)) return relations
['def', '_find_relations', '(', 'self', ')', ':', '# Get all extractions', 'extractions', '=', 'list', '(', 'self', '.', 'tree', '.', 'execute', '(', '"$.extractions[(@.@type is \'Extraction\')]"', ')', ')', '# Get relations from extractions', 'relations', '=', '[', ']', 'for', 'e', 'in', 'extractions', ':', 'label_set', '=', 'set', '(', 'e', '.', 'get', '(', "'labels'", ',', '[', ']', ')', ')', '# If this is a DirectedRelation', 'if', "'DirectedRelation'", 'in', 'label_set', ':', 'self', '.', 'relation_dict', '[', 'e', '[', "'@id'", ']', ']', '=', 'e', 'subtype', '=', 'e', '.', 'get', '(', "'subtype'", ')', 'if', 'any', '(', 't', 'in', 'subtype', 'for', 't', 'in', 'polarities', '.', 'keys', '(', ')', ')', ':', 'relations', '.', 'append', '(', '(', 'subtype', ',', 'e', ')', ')', '# If this is an Event or an Entity', 'if', '{', "'Event'", ',', "'Entity'", '}', '&', 'label_set', ':', 'self', '.', 'concept_dict', '[', 'e', '[', "'@id'", ']', ']', '=', 'e', 'if', 'not', 'relations', 'and', 'not', 'self', '.', 'relation_dict', ':', 'logger', '.', 'info', '(', '"No relations found."', ')', 'else', ':', 'logger', '.', 'info', '(', "'%d relations of types %s found'", '%', '(', 'len', '(', 'relations', ')', ',', "', '", '.', 'join', '(', 'polarities', '.', 'keys', '(', ')', ')', ')', ')', 'logger', '.', 'info', '(', "'%d relations in dict.'", '%', 'len', '(', 'self', '.', 'relation_dict', ')', ')', 'logger', '.', 'info', '(', "'%d concepts found.'", '%', 'len', '(', 'self', '.', 'concept_dict', ')', ')', 'return', 'relations']
Find all relevant relation elements and return them in a list.
['Find', 'all', 'relevant', 'relation', 'elements', 'and', 'return', 'them', 'in', 'a', 'list', '.']
train
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/hume/processor.py#L68-L95
8,888
saltstack/salt
salt/master.py
Master._pre_flight
def _pre_flight(self): ''' Run pre flight checks. If anything in this method fails then the master should not start up. ''' errors = [] critical_errors = [] try: os.chdir('/') except OSError as err: errors.append( 'Cannot change to root directory ({0})'.format(err) ) if self.opts.get('fileserver_verify_config', True): # Avoid circular import import salt.fileserver fileserver = salt.fileserver.Fileserver(self.opts) if not fileserver.servers: errors.append( 'Failed to load fileserver backends, the configured backends ' 'are: {0}'.format(', '.join(self.opts['fileserver_backend'])) ) else: # Run init() for all backends which support the function, to # double-check configuration try: fileserver.init() except salt.exceptions.FileserverConfigError as exc: critical_errors.append('{0}'.format(exc)) if not self.opts['fileserver_backend']: errors.append('No fileserver backends are configured') # Check to see if we need to create a pillar cache dir if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')): try: with salt.utils.files.set_umask(0o077): os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache')) except OSError: pass if self.opts.get('git_pillar_verify_config', True): try: git_pillars = [ x for x in self.opts.get('ext_pillar', []) if 'git' in x and not isinstance(x['git'], six.string_types) ] except TypeError: git_pillars = [] critical_errors.append( 'Invalid ext_pillar configuration. It is likely that the ' 'external pillar type was not specified for one or more ' 'external pillars.' ) if git_pillars: try: new_opts = copy.deepcopy(self.opts) import salt.pillar.git_pillar for repo in git_pillars: new_opts['ext_pillar'] = [repo] try: git_pillar = salt.utils.gitfs.GitPillar( new_opts, repo['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) except salt.exceptions.FileserverConfigError as exc: critical_errors.append(exc.strerror) finally: del new_opts if errors or critical_errors: for error in errors: log.error(error) for error in critical_errors: log.critical(error) log.critical('Master failed pre flight checks, exiting\n') sys.exit(salt.defaults.exitcodes.EX_GENERIC)
python
def _pre_flight(self): ''' Run pre flight checks. If anything in this method fails then the master should not start up. ''' errors = [] critical_errors = [] try: os.chdir('/') except OSError as err: errors.append( 'Cannot change to root directory ({0})'.format(err) ) if self.opts.get('fileserver_verify_config', True): # Avoid circular import import salt.fileserver fileserver = salt.fileserver.Fileserver(self.opts) if not fileserver.servers: errors.append( 'Failed to load fileserver backends, the configured backends ' 'are: {0}'.format(', '.join(self.opts['fileserver_backend'])) ) else: # Run init() for all backends which support the function, to # double-check configuration try: fileserver.init() except salt.exceptions.FileserverConfigError as exc: critical_errors.append('{0}'.format(exc)) if not self.opts['fileserver_backend']: errors.append('No fileserver backends are configured') # Check to see if we need to create a pillar cache dir if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')): try: with salt.utils.files.set_umask(0o077): os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache')) except OSError: pass if self.opts.get('git_pillar_verify_config', True): try: git_pillars = [ x for x in self.opts.get('ext_pillar', []) if 'git' in x and not isinstance(x['git'], six.string_types) ] except TypeError: git_pillars = [] critical_errors.append( 'Invalid ext_pillar configuration. It is likely that the ' 'external pillar type was not specified for one or more ' 'external pillars.' ) if git_pillars: try: new_opts = copy.deepcopy(self.opts) import salt.pillar.git_pillar for repo in git_pillars: new_opts['ext_pillar'] = [repo] try: git_pillar = salt.utils.gitfs.GitPillar( new_opts, repo['git'], per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES, per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY, global_only=salt.pillar.git_pillar.GLOBAL_ONLY) except salt.exceptions.FileserverConfigError as exc: critical_errors.append(exc.strerror) finally: del new_opts if errors or critical_errors: for error in errors: log.error(error) for error in critical_errors: log.critical(error) log.critical('Master failed pre flight checks, exiting\n') sys.exit(salt.defaults.exitcodes.EX_GENERIC)
['def', '_pre_flight', '(', 'self', ')', ':', 'errors', '=', '[', ']', 'critical_errors', '=', '[', ']', 'try', ':', 'os', '.', 'chdir', '(', "'/'", ')', 'except', 'OSError', 'as', 'err', ':', 'errors', '.', 'append', '(', "'Cannot change to root directory ({0})'", '.', 'format', '(', 'err', ')', ')', 'if', 'self', '.', 'opts', '.', 'get', '(', "'fileserver_verify_config'", ',', 'True', ')', ':', '# Avoid circular import', 'import', 'salt', '.', 'fileserver', 'fileserver', '=', 'salt', '.', 'fileserver', '.', 'Fileserver', '(', 'self', '.', 'opts', ')', 'if', 'not', 'fileserver', '.', 'servers', ':', 'errors', '.', 'append', '(', "'Failed to load fileserver backends, the configured backends '", "'are: {0}'", '.', 'format', '(', "', '", '.', 'join', '(', 'self', '.', 'opts', '[', "'fileserver_backend'", ']', ')', ')', ')', 'else', ':', '# Run init() for all backends which support the function, to', '# double-check configuration', 'try', ':', 'fileserver', '.', 'init', '(', ')', 'except', 'salt', '.', 'exceptions', '.', 'FileserverConfigError', 'as', 'exc', ':', 'critical_errors', '.', 'append', '(', "'{0}'", '.', 'format', '(', 'exc', ')', ')', 'if', 'not', 'self', '.', 'opts', '[', "'fileserver_backend'", ']', ':', 'errors', '.', 'append', '(', "'No fileserver backends are configured'", ')', '# Check to see if we need to create a pillar cache dir', 'if', 'self', '.', 'opts', '[', "'pillar_cache'", ']', 'and', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'cachedir'", ']', ',', "'pillar_cache'", ')', ')', ':', 'try', ':', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'set_umask', '(', '0o077', ')', ':', 'os', '.', 'mkdir', '(', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'opts', '[', "'cachedir'", ']', ',', "'pillar_cache'", ')', ')', 'except', 'OSError', ':', 'pass', 'if', 'self', '.', 'opts', '.', 'get', '(', "'git_pillar_verify_config'", ',', 'True', ')', ':', 'try', ':', 'git_pillars', '=', '[', 'x', 'for', 'x', 'in', 'self', '.', 'opts', '.', 'get', '(', "'ext_pillar'", ',', '[', ']', ')', 'if', "'git'", 'in', 'x', 'and', 'not', 'isinstance', '(', 'x', '[', "'git'", ']', ',', 'six', '.', 'string_types', ')', ']', 'except', 'TypeError', ':', 'git_pillars', '=', '[', ']', 'critical_errors', '.', 'append', '(', "'Invalid ext_pillar configuration. It is likely that the '", "'external pillar type was not specified for one or more '", "'external pillars.'", ')', 'if', 'git_pillars', ':', 'try', ':', 'new_opts', '=', 'copy', '.', 'deepcopy', '(', 'self', '.', 'opts', ')', 'import', 'salt', '.', 'pillar', '.', 'git_pillar', 'for', 'repo', 'in', 'git_pillars', ':', 'new_opts', '[', "'ext_pillar'", ']', '=', '[', 'repo', ']', 'try', ':', 'git_pillar', '=', 'salt', '.', 'utils', '.', 'gitfs', '.', 'GitPillar', '(', 'new_opts', ',', 'repo', '[', "'git'", ']', ',', 'per_remote_overrides', '=', 'salt', '.', 'pillar', '.', 'git_pillar', '.', 'PER_REMOTE_OVERRIDES', ',', 'per_remote_only', '=', 'salt', '.', 'pillar', '.', 'git_pillar', '.', 'PER_REMOTE_ONLY', ',', 'global_only', '=', 'salt', '.', 'pillar', '.', 'git_pillar', '.', 'GLOBAL_ONLY', ')', 'except', 'salt', '.', 'exceptions', '.', 'FileserverConfigError', 'as', 'exc', ':', 'critical_errors', '.', 'append', '(', 'exc', '.', 'strerror', ')', 'finally', ':', 'del', 'new_opts', 'if', 'errors', 'or', 'critical_errors', ':', 'for', 'error', 'in', 'errors', ':', 'log', '.', 'error', '(', 'error', ')', 'for', 'error', 'in', 'critical_errors', ':', 'log', '.', 'critical', '(', 'error', ')', 'log', '.', 'critical', '(', "'Master failed pre flight checks, exiting\\n'", ')', 'sys', '.', 'exit', '(', 'salt', '.', 'defaults', '.', 'exitcodes', '.', 'EX_GENERIC', ')']
Run pre flight checks. If anything in this method fails then the master should not start up.
['Run', 'pre', 'flight', 'checks', '.', 'If', 'anything', 'in', 'this', 'method', 'fails', 'then', 'the', 'master', 'should', 'not', 'start', 'up', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/master.py#L563-L644
8,889
perrygeo/python-rasterstats
src/rasterstats/point.py
gen_point_query
def gen_point_query( vectors, raster, band=1, layer=0, nodata=None, affine=None, interpolate='bilinear', property_name='value', geojson_out=False): """ Given a set of vector features and a raster, generate raster values at each vertex of the geometry For features with point geometry, the values will be a 1D with the index refering to the feature For features with other geometry types, it effectively creates a 2D list, such that the first index is the feature, the second is the vertex within the geometry Parameters ---------- vectors: path to an vector source or geo-like python objects raster: ndarray or path to a GDAL raster source If ndarray is passed, the `transform` kwarg is required. layer: int or string, optional If `vectors` is a path to an fiona source, specify the vector layer to use either by name or number. defaults to 0 band: int, optional If `raster` is a GDAL source, the band number to use (counting from 1). defaults to 1. nodata: float, optional If `raster` is a GDAL source, this value overrides any NODATA value specified in the file's metadata. If `None`, the file's metadata's NODATA value (if any) will be used. defaults to `None`. affine: Affine instance required only for ndarrays, otherwise it is read from src interpolate: string 'bilinear' or 'nearest' interpolation property_name: string name of property key if geojson_out geojson_out: boolean generate GeoJSON-like features (default: False) original feature geometry and properties will be retained point query values appended as additional properties. Returns ------- generator of arrays (if ``geojson_out`` is False) generator of geojson features (if ``geojson_out`` is True) """ if interpolate not in ['nearest', 'bilinear']: raise ValueError("interpolate must be nearest or bilinear") features_iter = read_features(vectors, layer) with Raster(raster, nodata=nodata, affine=affine, band=band) as rast: for feat in features_iter: geom = shape(feat['geometry']) vals = [] for x, y in geom_xys(geom): if interpolate == 'nearest': r, c = rast.index(x, y) window = ((int(r), int(r+1)), (int(c), int(c+1))) src_array = rast.read(window=window, masked=True).array val = src_array[0, 0] if val is masked: vals.append(None) else: vals.append(asscalar(val)) elif interpolate == 'bilinear': window, unitxy = point_window_unitxy(x, y, rast.affine) src_array = rast.read(window=window, masked=True).array vals.append(bilinear(src_array, *unitxy)) if len(vals) == 1: vals = vals[0] # flatten single-element lists if geojson_out: if 'properties' not in feat: feat['properties'] = {} feat['properties'][property_name] = vals yield feat else: yield vals
python
def gen_point_query( vectors, raster, band=1, layer=0, nodata=None, affine=None, interpolate='bilinear', property_name='value', geojson_out=False): """ Given a set of vector features and a raster, generate raster values at each vertex of the geometry For features with point geometry, the values will be a 1D with the index refering to the feature For features with other geometry types, it effectively creates a 2D list, such that the first index is the feature, the second is the vertex within the geometry Parameters ---------- vectors: path to an vector source or geo-like python objects raster: ndarray or path to a GDAL raster source If ndarray is passed, the `transform` kwarg is required. layer: int or string, optional If `vectors` is a path to an fiona source, specify the vector layer to use either by name or number. defaults to 0 band: int, optional If `raster` is a GDAL source, the band number to use (counting from 1). defaults to 1. nodata: float, optional If `raster` is a GDAL source, this value overrides any NODATA value specified in the file's metadata. If `None`, the file's metadata's NODATA value (if any) will be used. defaults to `None`. affine: Affine instance required only for ndarrays, otherwise it is read from src interpolate: string 'bilinear' or 'nearest' interpolation property_name: string name of property key if geojson_out geojson_out: boolean generate GeoJSON-like features (default: False) original feature geometry and properties will be retained point query values appended as additional properties. Returns ------- generator of arrays (if ``geojson_out`` is False) generator of geojson features (if ``geojson_out`` is True) """ if interpolate not in ['nearest', 'bilinear']: raise ValueError("interpolate must be nearest or bilinear") features_iter = read_features(vectors, layer) with Raster(raster, nodata=nodata, affine=affine, band=band) as rast: for feat in features_iter: geom = shape(feat['geometry']) vals = [] for x, y in geom_xys(geom): if interpolate == 'nearest': r, c = rast.index(x, y) window = ((int(r), int(r+1)), (int(c), int(c+1))) src_array = rast.read(window=window, masked=True).array val = src_array[0, 0] if val is masked: vals.append(None) else: vals.append(asscalar(val)) elif interpolate == 'bilinear': window, unitxy = point_window_unitxy(x, y, rast.affine) src_array = rast.read(window=window, masked=True).array vals.append(bilinear(src_array, *unitxy)) if len(vals) == 1: vals = vals[0] # flatten single-element lists if geojson_out: if 'properties' not in feat: feat['properties'] = {} feat['properties'][property_name] = vals yield feat else: yield vals
['def', 'gen_point_query', '(', 'vectors', ',', 'raster', ',', 'band', '=', '1', ',', 'layer', '=', '0', ',', 'nodata', '=', 'None', ',', 'affine', '=', 'None', ',', 'interpolate', '=', "'bilinear'", ',', 'property_name', '=', "'value'", ',', 'geojson_out', '=', 'False', ')', ':', 'if', 'interpolate', 'not', 'in', '[', "'nearest'", ',', "'bilinear'", ']', ':', 'raise', 'ValueError', '(', '"interpolate must be nearest or bilinear"', ')', 'features_iter', '=', 'read_features', '(', 'vectors', ',', 'layer', ')', 'with', 'Raster', '(', 'raster', ',', 'nodata', '=', 'nodata', ',', 'affine', '=', 'affine', ',', 'band', '=', 'band', ')', 'as', 'rast', ':', 'for', 'feat', 'in', 'features_iter', ':', 'geom', '=', 'shape', '(', 'feat', '[', "'geometry'", ']', ')', 'vals', '=', '[', ']', 'for', 'x', ',', 'y', 'in', 'geom_xys', '(', 'geom', ')', ':', 'if', 'interpolate', '==', "'nearest'", ':', 'r', ',', 'c', '=', 'rast', '.', 'index', '(', 'x', ',', 'y', ')', 'window', '=', '(', '(', 'int', '(', 'r', ')', ',', 'int', '(', 'r', '+', '1', ')', ')', ',', '(', 'int', '(', 'c', ')', ',', 'int', '(', 'c', '+', '1', ')', ')', ')', 'src_array', '=', 'rast', '.', 'read', '(', 'window', '=', 'window', ',', 'masked', '=', 'True', ')', '.', 'array', 'val', '=', 'src_array', '[', '0', ',', '0', ']', 'if', 'val', 'is', 'masked', ':', 'vals', '.', 'append', '(', 'None', ')', 'else', ':', 'vals', '.', 'append', '(', 'asscalar', '(', 'val', ')', ')', 'elif', 'interpolate', '==', "'bilinear'", ':', 'window', ',', 'unitxy', '=', 'point_window_unitxy', '(', 'x', ',', 'y', ',', 'rast', '.', 'affine', ')', 'src_array', '=', 'rast', '.', 'read', '(', 'window', '=', 'window', ',', 'masked', '=', 'True', ')', '.', 'array', 'vals', '.', 'append', '(', 'bilinear', '(', 'src_array', ',', '*', 'unitxy', ')', ')', 'if', 'len', '(', 'vals', ')', '==', '1', ':', 'vals', '=', 'vals', '[', '0', ']', '# flatten single-element lists', 'if', 'geojson_out', ':', 'if', "'properties'", 'not', 'in', 'feat', ':', 'feat', '[', "'properties'", ']', '=', '{', '}', 'feat', '[', "'properties'", ']', '[', 'property_name', ']', '=', 'vals', 'yield', 'feat', 'else', ':', 'yield', 'vals']
Given a set of vector features and a raster, generate raster values at each vertex of the geometry For features with point geometry, the values will be a 1D with the index refering to the feature For features with other geometry types, it effectively creates a 2D list, such that the first index is the feature, the second is the vertex within the geometry Parameters ---------- vectors: path to an vector source or geo-like python objects raster: ndarray or path to a GDAL raster source If ndarray is passed, the `transform` kwarg is required. layer: int or string, optional If `vectors` is a path to an fiona source, specify the vector layer to use either by name or number. defaults to 0 band: int, optional If `raster` is a GDAL source, the band number to use (counting from 1). defaults to 1. nodata: float, optional If `raster` is a GDAL source, this value overrides any NODATA value specified in the file's metadata. If `None`, the file's metadata's NODATA value (if any) will be used. defaults to `None`. affine: Affine instance required only for ndarrays, otherwise it is read from src interpolate: string 'bilinear' or 'nearest' interpolation property_name: string name of property key if geojson_out geojson_out: boolean generate GeoJSON-like features (default: False) original feature geometry and properties will be retained point query values appended as additional properties. Returns ------- generator of arrays (if ``geojson_out`` is False) generator of geojson features (if ``geojson_out`` is True)
['Given', 'a', 'set', 'of', 'vector', 'features', 'and', 'a', 'raster', 'generate', 'raster', 'values', 'at', 'each', 'vertex', 'of', 'the', 'geometry']
train
https://github.com/perrygeo/python-rasterstats/blob/910455cd7c9c21eadf464927db72b38ef62b7dfb/src/rasterstats/point.py#L100-L197
8,890
fractalego/parvusdb
parvusdb/utils/graph_database.py
GraphDatabase.query
def query(self, string, repeat_n_times=None): """ This method performs the operations onto self.g :param string: The list of operations to perform. The sequences of commands should be separated by a semicolon An example might be CREATE {'tag': 'PERSON', 'text': 'joseph'}(v1), {'relation': 'LIVES_AT'}(v1,v2), {'tag': 'PLACE', 'text': 'London'}(v2) MATCH {}(_a), {'relation': 'LIVES_AT'}(_a,_b), {}(_b) WHERE (= (get _a "text") "joseph") RETURN _a,_b; :param repeat_n_times: The maximum number of times the graph is queried. It sets the maximum length of the return list. If None then the value is set by the function self.__determine_how_many_times_to_repeat_query(string) :return: If the RETURN command is called with a list of variables names, a list of JSON with the corresponding properties is returned. If the RETURN command is used alone, a list with the entire graph is returned. Otherwise it returns an empty list """ if not repeat_n_times: repeat_n_times = self.__determine_how_many_times_to_repeat_query(string) lines = self.__get_command_lines(string) return_list = [] for line in lines: lst = self.__query_n_times(line, repeat_n_times) if lst and lst[0]: return_list = lst return return_list
python
def query(self, string, repeat_n_times=None): """ This method performs the operations onto self.g :param string: The list of operations to perform. The sequences of commands should be separated by a semicolon An example might be CREATE {'tag': 'PERSON', 'text': 'joseph'}(v1), {'relation': 'LIVES_AT'}(v1,v2), {'tag': 'PLACE', 'text': 'London'}(v2) MATCH {}(_a), {'relation': 'LIVES_AT'}(_a,_b), {}(_b) WHERE (= (get _a "text") "joseph") RETURN _a,_b; :param repeat_n_times: The maximum number of times the graph is queried. It sets the maximum length of the return list. If None then the value is set by the function self.__determine_how_many_times_to_repeat_query(string) :return: If the RETURN command is called with a list of variables names, a list of JSON with the corresponding properties is returned. If the RETURN command is used alone, a list with the entire graph is returned. Otherwise it returns an empty list """ if not repeat_n_times: repeat_n_times = self.__determine_how_many_times_to_repeat_query(string) lines = self.__get_command_lines(string) return_list = [] for line in lines: lst = self.__query_n_times(line, repeat_n_times) if lst and lst[0]: return_list = lst return return_list
['def', 'query', '(', 'self', ',', 'string', ',', 'repeat_n_times', '=', 'None', ')', ':', 'if', 'not', 'repeat_n_times', ':', 'repeat_n_times', '=', 'self', '.', '__determine_how_many_times_to_repeat_query', '(', 'string', ')', 'lines', '=', 'self', '.', '__get_command_lines', '(', 'string', ')', 'return_list', '=', '[', ']', 'for', 'line', 'in', 'lines', ':', 'lst', '=', 'self', '.', '__query_n_times', '(', 'line', ',', 'repeat_n_times', ')', 'if', 'lst', 'and', 'lst', '[', '0', ']', ':', 'return_list', '=', 'lst', 'return', 'return_list']
This method performs the operations onto self.g :param string: The list of operations to perform. The sequences of commands should be separated by a semicolon An example might be CREATE {'tag': 'PERSON', 'text': 'joseph'}(v1), {'relation': 'LIVES_AT'}(v1,v2), {'tag': 'PLACE', 'text': 'London'}(v2) MATCH {}(_a), {'relation': 'LIVES_AT'}(_a,_b), {}(_b) WHERE (= (get _a "text") "joseph") RETURN _a,_b; :param repeat_n_times: The maximum number of times the graph is queried. It sets the maximum length of the return list. If None then the value is set by the function self.__determine_how_many_times_to_repeat_query(string) :return: If the RETURN command is called with a list of variables names, a list of JSON with the corresponding properties is returned. If the RETURN command is used alone, a list with the entire graph is returned. Otherwise it returns an empty list
['This', 'method', 'performs', 'the', 'operations', 'onto', 'self', '.', 'g']
train
https://github.com/fractalego/parvusdb/blob/d5e818d3f3c3decfd4835ef2133aa956b6d87b1d/parvusdb/utils/graph_database.py#L35-L62
8,891
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
bsrchd
def bsrchd(value, ndim, array): """ Do a binary search for a key value within a double precision array, assumed to be in increasing order. Return the index of the matching array entry, or -1 if the key value is not found. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bsrchd_c.html :param value: Value to find in array. :type value: float :param ndim: Dimension of array. :type ndim: int :param array: Array to be searched. :type array: Array of floats :return: index :rtype: int """ value = ctypes.c_double(value) ndim = ctypes.c_int(ndim) array = stypes.toDoubleVector(array) return libspice.bsrchd_c(value, ndim, array)
python
def bsrchd(value, ndim, array): """ Do a binary search for a key value within a double precision array, assumed to be in increasing order. Return the index of the matching array entry, or -1 if the key value is not found. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bsrchd_c.html :param value: Value to find in array. :type value: float :param ndim: Dimension of array. :type ndim: int :param array: Array to be searched. :type array: Array of floats :return: index :rtype: int """ value = ctypes.c_double(value) ndim = ctypes.c_int(ndim) array = stypes.toDoubleVector(array) return libspice.bsrchd_c(value, ndim, array)
['def', 'bsrchd', '(', 'value', ',', 'ndim', ',', 'array', ')', ':', 'value', '=', 'ctypes', '.', 'c_double', '(', 'value', ')', 'ndim', '=', 'ctypes', '.', 'c_int', '(', 'ndim', ')', 'array', '=', 'stypes', '.', 'toDoubleVector', '(', 'array', ')', 'return', 'libspice', '.', 'bsrchd_c', '(', 'value', ',', 'ndim', ',', 'array', ')']
Do a binary search for a key value within a double precision array, assumed to be in increasing order. Return the index of the matching array entry, or -1 if the key value is not found. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bsrchd_c.html :param value: Value to find in array. :type value: float :param ndim: Dimension of array. :type ndim: int :param array: Array to be searched. :type array: Array of floats :return: index :rtype: int
['Do', 'a', 'binary', 'search', 'for', 'a', 'key', 'value', 'within', 'a', 'double', 'precision', 'array', 'assumed', 'to', 'be', 'in', 'increasing', 'order', '.', 'Return', 'the', 'index', 'of', 'the', 'matching', 'array', 'entry', 'or', '-', '1', 'if', 'the', 'key', 'value', 'is', 'not', 'found', '.']
train
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L732-L752
8,892
joke2k/faker
faker/providers/__init__.py
BaseProvider.lexify
def lexify(self, text='????', letters=string.ascii_letters): """ Replaces all question mark ('?') occurrences with a random letter. :param text: string to be parsed :param letters: a set of letters to choose from. :returns: string with all letter placeholders filled in """ return _re_qm.sub(lambda x: self.random_element(letters), text)
python
def lexify(self, text='????', letters=string.ascii_letters): """ Replaces all question mark ('?') occurrences with a random letter. :param text: string to be parsed :param letters: a set of letters to choose from. :returns: string with all letter placeholders filled in """ return _re_qm.sub(lambda x: self.random_element(letters), text)
['def', 'lexify', '(', 'self', ',', 'text', '=', "'????'", ',', 'letters', '=', 'string', '.', 'ascii_letters', ')', ':', 'return', '_re_qm', '.', 'sub', '(', 'lambda', 'x', ':', 'self', '.', 'random_element', '(', 'letters', ')', ',', 'text', ')']
Replaces all question mark ('?') occurrences with a random letter. :param text: string to be parsed :param letters: a set of letters to choose from. :returns: string with all letter placeholders filled in
['Replaces', 'all', 'question', 'mark', '(', '?', ')', 'occurrences', 'with', 'a', 'random', 'letter', '.']
train
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/__init__.py#L303-L311
8,893
Jaymon/prom
prom/interface/sqlite.py
SQLite.get_field_SQL
def get_field_SQL(self, field_name, field): """ returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL) """ field_type = "" is_pk = field.options.get('pk', False) if issubclass(field.type, bool): field_type = 'BOOLEAN' elif issubclass(field.type, long): if is_pk: field_type = 'INTEGER PRIMARY KEY' else: field_type = 'BIGINT' elif issubclass(field.type, int): field_type = 'INTEGER' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, basestring): fo = field.options if field.is_ref(): # TODO -- 7-8-17 - this isn't a great way to do this, ideally the Field instance # would combine all the options of both the current field and the # foreign key field and return all those when Field.options is called # (with the current field's options taking precedence) but there are # lots of circular dependency things that happen when one field is # trying to get the schema of another field and I don't have time # to sort it all out right now ref_s = field.schema fo = ref_s.pk.options if 'size' in fo: field_type = 'CHARACTER({})'.format(fo['size']) elif 'max_size' in fo: field_type = 'VARCHAR({})'.format(fo['max_size']) else: field_type = 'TEXT' if fo.get('ignore_case', False): field_type += ' COLLATE NOCASE' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, datetime.datetime): #field_type = 'DATETIME' field_type = 'TIMESTAMP' elif issubclass(field.type, datetime.date): field_type = 'DATE' elif issubclass(field.type, float): field_type = 'REAL' size = field.options.get('size', field.options.get('max_size', 0)) if size > 6: field_type = 'DOUBLE PRECISION' elif issubclass(field.type, decimal.Decimal): field_type = 'NUMERIC' elif issubclass(field.type, bytearray): field_type = 'BLOB' else: raise ValueError('unknown python type: {}'.format(field.type.__name__)) if field.required: field_type += ' NOT NULL' else: field_type += ' NULL' if not is_pk: if field.is_ref(): ref_s = field.schema if field.required: # strong ref, it deletes on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE'.format( ref_s, ref_s.pk.name ) else: # weak ref, it sets column to null on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL'.format( ref_s, ref_s.pk.name ) return '{} {}'.format(self._normalize_name(field_name), field_type)
python
def get_field_SQL(self, field_name, field): """ returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL) """ field_type = "" is_pk = field.options.get('pk', False) if issubclass(field.type, bool): field_type = 'BOOLEAN' elif issubclass(field.type, long): if is_pk: field_type = 'INTEGER PRIMARY KEY' else: field_type = 'BIGINT' elif issubclass(field.type, int): field_type = 'INTEGER' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, basestring): fo = field.options if field.is_ref(): # TODO -- 7-8-17 - this isn't a great way to do this, ideally the Field instance # would combine all the options of both the current field and the # foreign key field and return all those when Field.options is called # (with the current field's options taking precedence) but there are # lots of circular dependency things that happen when one field is # trying to get the schema of another field and I don't have time # to sort it all out right now ref_s = field.schema fo = ref_s.pk.options if 'size' in fo: field_type = 'CHARACTER({})'.format(fo['size']) elif 'max_size' in fo: field_type = 'VARCHAR({})'.format(fo['max_size']) else: field_type = 'TEXT' if fo.get('ignore_case', False): field_type += ' COLLATE NOCASE' if is_pk: field_type += ' PRIMARY KEY' elif issubclass(field.type, datetime.datetime): #field_type = 'DATETIME' field_type = 'TIMESTAMP' elif issubclass(field.type, datetime.date): field_type = 'DATE' elif issubclass(field.type, float): field_type = 'REAL' size = field.options.get('size', field.options.get('max_size', 0)) if size > 6: field_type = 'DOUBLE PRECISION' elif issubclass(field.type, decimal.Decimal): field_type = 'NUMERIC' elif issubclass(field.type, bytearray): field_type = 'BLOB' else: raise ValueError('unknown python type: {}'.format(field.type.__name__)) if field.required: field_type += ' NOT NULL' else: field_type += ' NULL' if not is_pk: if field.is_ref(): ref_s = field.schema if field.required: # strong ref, it deletes on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE'.format( ref_s, ref_s.pk.name ) else: # weak ref, it sets column to null on fk row removal field_type += ' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL'.format( ref_s, ref_s.pk.name ) return '{} {}'.format(self._normalize_name(field_name), field_type)
['def', 'get_field_SQL', '(', 'self', ',', 'field_name', ',', 'field', ')', ':', 'field_type', '=', '""', 'is_pk', '=', 'field', '.', 'options', '.', 'get', '(', "'pk'", ',', 'False', ')', 'if', 'issubclass', '(', 'field', '.', 'type', ',', 'bool', ')', ':', 'field_type', '=', "'BOOLEAN'", 'elif', 'issubclass', '(', 'field', '.', 'type', ',', 'long', ')', ':', 'if', 'is_pk', ':', 'field_type', '=', "'INTEGER PRIMARY KEY'", 'else', ':', 'field_type', '=', "'BIGINT'", 'elif', 'issubclass', '(', 'field', '.', 'type', ',', 'int', ')', ':', 'field_type', '=', "'INTEGER'", 'if', 'is_pk', ':', 'field_type', '+=', "' PRIMARY KEY'", 'elif', 'issubclass', '(', 'field', '.', 'type', ',', 'basestring', ')', ':', 'fo', '=', 'field', '.', 'options', 'if', 'field', '.', 'is_ref', '(', ')', ':', "# TODO -- 7-8-17 - this isn't a great way to do this, ideally the Field instance", '# would combine all the options of both the current field and the', '# foreign key field and return all those when Field.options is called', "# (with the current field's options taking precedence) but there are", '# lots of circular dependency things that happen when one field is', "# trying to get the schema of another field and I don't have time", '# to sort it all out right now', 'ref_s', '=', 'field', '.', 'schema', 'fo', '=', 'ref_s', '.', 'pk', '.', 'options', 'if', "'size'", 'in', 'fo', ':', 'field_type', '=', "'CHARACTER({})'", '.', 'format', '(', 'fo', '[', "'size'", ']', ')', 'elif', "'max_size'", 'in', 'fo', ':', 'field_type', '=', "'VARCHAR({})'", '.', 'format', '(', 'fo', '[', "'max_size'", ']', ')', 'else', ':', 'field_type', '=', "'TEXT'", 'if', 'fo', '.', 'get', '(', "'ignore_case'", ',', 'False', ')', ':', 'field_type', '+=', "' COLLATE NOCASE'", 'if', 'is_pk', ':', 'field_type', '+=', "' PRIMARY KEY'", 'elif', 'issubclass', '(', 'field', '.', 'type', ',', 'datetime', '.', 'datetime', ')', ':', "#field_type = 'DATETIME'", 'field_type', '=', "'TIMESTAMP'", 'elif', 'issubclass', '(', 'field', '.', 'type', ',', 'datetime', '.', 'date', ')', ':', 'field_type', '=', "'DATE'", 'elif', 'issubclass', '(', 'field', '.', 'type', ',', 'float', ')', ':', 'field_type', '=', "'REAL'", 'size', '=', 'field', '.', 'options', '.', 'get', '(', "'size'", ',', 'field', '.', 'options', '.', 'get', '(', "'max_size'", ',', '0', ')', ')', 'if', 'size', '>', '6', ':', 'field_type', '=', "'DOUBLE PRECISION'", 'elif', 'issubclass', '(', 'field', '.', 'type', ',', 'decimal', '.', 'Decimal', ')', ':', 'field_type', '=', "'NUMERIC'", 'elif', 'issubclass', '(', 'field', '.', 'type', ',', 'bytearray', ')', ':', 'field_type', '=', "'BLOB'", 'else', ':', 'raise', 'ValueError', '(', "'unknown python type: {}'", '.', 'format', '(', 'field', '.', 'type', '.', '__name__', ')', ')', 'if', 'field', '.', 'required', ':', 'field_type', '+=', "' NOT NULL'", 'else', ':', 'field_type', '+=', "' NULL'", 'if', 'not', 'is_pk', ':', 'if', 'field', '.', 'is_ref', '(', ')', ':', 'ref_s', '=', 'field', '.', 'schema', 'if', 'field', '.', 'required', ':', '# strong ref, it deletes on fk row removal', 'field_type', '+=', "' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE CASCADE'", '.', 'format', '(', 'ref_s', ',', 'ref_s', '.', 'pk', '.', 'name', ')', 'else', ':', '# weak ref, it sets column to null on fk row removal', 'field_type', '+=', "' REFERENCES {} ({}) ON UPDATE CASCADE ON DELETE SET NULL'", '.', 'format', '(', 'ref_s', ',', 'ref_s', '.', 'pk', '.', 'name', ')', 'return', "'{} {}'", '.', 'format', '(', 'self', '.', '_normalize_name', '(', 'field_name', ')', ',', 'field_type', ')']
returns the SQL for a given field with full type information http://www.sqlite.org/datatype3.html field_name -- string -- the field's name field -- Field() -- the set options for the field return -- string -- the field type (eg, foo BOOL NOT NULL)
['returns', 'the', 'SQL', 'for', 'a', 'given', 'field', 'with', 'full', 'type', 'information']
train
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L266-L362
8,894
pandas-dev/pandas
pandas/core/indexes/datetimelike.py
DatetimeIndexOpsMixin._create_comparison_method
def _create_comparison_method(cls, op): """ Create a comparison method that dispatches to ``cls.values``. """ def wrapper(self, other): if isinstance(other, ABCSeries): # the arrays defer to Series for comparison ops but the indexes # don't, so we have to unwrap here. other = other._values result = op(self._data, maybe_unwrap_index(other)) return result wrapper.__doc__ = op.__doc__ wrapper.__name__ = '__{}__'.format(op.__name__) return wrapper
python
def _create_comparison_method(cls, op): """ Create a comparison method that dispatches to ``cls.values``. """ def wrapper(self, other): if isinstance(other, ABCSeries): # the arrays defer to Series for comparison ops but the indexes # don't, so we have to unwrap here. other = other._values result = op(self._data, maybe_unwrap_index(other)) return result wrapper.__doc__ = op.__doc__ wrapper.__name__ = '__{}__'.format(op.__name__) return wrapper
['def', '_create_comparison_method', '(', 'cls', ',', 'op', ')', ':', 'def', 'wrapper', '(', 'self', ',', 'other', ')', ':', 'if', 'isinstance', '(', 'other', ',', 'ABCSeries', ')', ':', '# the arrays defer to Series for comparison ops but the indexes', "# don't, so we have to unwrap here.", 'other', '=', 'other', '.', '_values', 'result', '=', 'op', '(', 'self', '.', '_data', ',', 'maybe_unwrap_index', '(', 'other', ')', ')', 'return', 'result', 'wrapper', '.', '__doc__', '=', 'op', '.', '__doc__', 'wrapper', '.', '__name__', '=', "'__{}__'", '.', 'format', '(', 'op', '.', '__name__', ')', 'return', 'wrapper']
Create a comparison method that dispatches to ``cls.values``.
['Create', 'a', 'comparison', 'method', 'that', 'dispatches', 'to', 'cls', '.', 'values', '.']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L107-L122
8,895
kigawas/eciespy
ecies/__init__.py
encrypt
def encrypt(receiver_pubhex: str, msg: bytes) -> bytes: """ Encrypt with eth public key Parameters ---------- receiver_pubhex: str Receiver's ethereum public key hex string msg: bytes Data to encrypt Returns ------- bytes Encrypted data """ disposable_key = generate_key() receiver_pubkey = hex2pub(receiver_pubhex) aes_key = derive(disposable_key, receiver_pubkey) cipher_text = aes_encrypt(aes_key, msg) return disposable_key.public_key.format(False) + cipher_text
python
def encrypt(receiver_pubhex: str, msg: bytes) -> bytes: """ Encrypt with eth public key Parameters ---------- receiver_pubhex: str Receiver's ethereum public key hex string msg: bytes Data to encrypt Returns ------- bytes Encrypted data """ disposable_key = generate_key() receiver_pubkey = hex2pub(receiver_pubhex) aes_key = derive(disposable_key, receiver_pubkey) cipher_text = aes_encrypt(aes_key, msg) return disposable_key.public_key.format(False) + cipher_text
['def', 'encrypt', '(', 'receiver_pubhex', ':', 'str', ',', 'msg', ':', 'bytes', ')', '->', 'bytes', ':', 'disposable_key', '=', 'generate_key', '(', ')', 'receiver_pubkey', '=', 'hex2pub', '(', 'receiver_pubhex', ')', 'aes_key', '=', 'derive', '(', 'disposable_key', ',', 'receiver_pubkey', ')', 'cipher_text', '=', 'aes_encrypt', '(', 'aes_key', ',', 'msg', ')', 'return', 'disposable_key', '.', 'public_key', '.', 'format', '(', 'False', ')', '+', 'cipher_text']
Encrypt with eth public key Parameters ---------- receiver_pubhex: str Receiver's ethereum public key hex string msg: bytes Data to encrypt Returns ------- bytes Encrypted data
['Encrypt', 'with', 'eth', 'public', 'key']
train
https://github.com/kigawas/eciespy/blob/233f3d7726bf03465a6b2470e83f34cc457eea6c/ecies/__init__.py#L6-L26
8,896
fabioz/PyDev.Debugger
third_party/pep8/autopep8.py
ReformattedLines._split_after_delimiter
def _split_after_delimiter(self, item, indent_amt): """Split the line only after a delimiter.""" self._delete_whitespace() if self.fits_on_current_line(item.size): return last_space = None for item in reversed(self._lines): if ( last_space and (not isinstance(item, Atom) or not item.is_colon) ): break else: last_space = None if isinstance(item, self._Space): last_space = item if isinstance(item, (self._LineBreak, self._Indent)): return if not last_space: return self.add_line_break_at(self._lines.index(last_space), indent_amt)
python
def _split_after_delimiter(self, item, indent_amt): """Split the line only after a delimiter.""" self._delete_whitespace() if self.fits_on_current_line(item.size): return last_space = None for item in reversed(self._lines): if ( last_space and (not isinstance(item, Atom) or not item.is_colon) ): break else: last_space = None if isinstance(item, self._Space): last_space = item if isinstance(item, (self._LineBreak, self._Indent)): return if not last_space: return self.add_line_break_at(self._lines.index(last_space), indent_amt)
['def', '_split_after_delimiter', '(', 'self', ',', 'item', ',', 'indent_amt', ')', ':', 'self', '.', '_delete_whitespace', '(', ')', 'if', 'self', '.', 'fits_on_current_line', '(', 'item', '.', 'size', ')', ':', 'return', 'last_space', '=', 'None', 'for', 'item', 'in', 'reversed', '(', 'self', '.', '_lines', ')', ':', 'if', '(', 'last_space', 'and', '(', 'not', 'isinstance', '(', 'item', ',', 'Atom', ')', 'or', 'not', 'item', '.', 'is_colon', ')', ')', ':', 'break', 'else', ':', 'last_space', '=', 'None', 'if', 'isinstance', '(', 'item', ',', 'self', '.', '_Space', ')', ':', 'last_space', '=', 'item', 'if', 'isinstance', '(', 'item', ',', '(', 'self', '.', '_LineBreak', ',', 'self', '.', '_Indent', ')', ')', ':', 'return', 'if', 'not', 'last_space', ':', 'return', 'self', '.', 'add_line_break_at', '(', 'self', '.', '_lines', '.', 'index', '(', 'last_space', ')', ',', 'indent_amt', ')']
Split the line only after a delimiter.
['Split', 'the', 'line', 'only', 'after', 'a', 'delimiter', '.']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/autopep8.py#L1897-L1921
8,897
PyGithub/PyGithub
github/Migration.py
Migration.get_archive_url
def get_archive_url(self): """ :calls: `GET /user/migrations/:migration_id/archive`_ :rtype: str """ headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/archive", headers={ "Accept": Consts.mediaTypeMigrationPreview } ) return data["data"]
python
def get_archive_url(self): """ :calls: `GET /user/migrations/:migration_id/archive`_ :rtype: str """ headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/archive", headers={ "Accept": Consts.mediaTypeMigrationPreview } ) return data["data"]
['def', 'get_archive_url', '(', 'self', ')', ':', 'headers', ',', 'data', '=', 'self', '.', '_requester', '.', 'requestJsonAndCheck', '(', '"GET"', ',', 'self', '.', 'url', '+', '"/archive"', ',', 'headers', '=', '{', '"Accept"', ':', 'Consts', '.', 'mediaTypeMigrationPreview', '}', ')', 'return', 'data', '[', '"data"', ']']
:calls: `GET /user/migrations/:migration_id/archive`_ :rtype: str
[':', 'calls', ':', 'GET', '/', 'user', '/', 'migrations', '/', ':', 'migration_id', '/', 'archive', '_', ':', 'rtype', ':', 'str']
train
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Migration.py#L146-L158
8,898
senaite/senaite.core
bika/lims/content/abstractbaseanalysis.py
AbstractBaseAnalysis.getTotalPrice
def getTotalPrice(self): """Compute total price including VAT """ price = self.getPrice() vat = self.getVAT() price = price and price or 0 vat = vat and vat or 0 return float(price) + (float(price) * float(vat)) / 100
python
def getTotalPrice(self): """Compute total price including VAT """ price = self.getPrice() vat = self.getVAT() price = price and price or 0 vat = vat and vat or 0 return float(price) + (float(price) * float(vat)) / 100
['def', 'getTotalPrice', '(', 'self', ')', ':', 'price', '=', 'self', '.', 'getPrice', '(', ')', 'vat', '=', 'self', '.', 'getVAT', '(', ')', 'price', '=', 'price', 'and', 'price', 'or', '0', 'vat', '=', 'vat', 'and', 'vat', 'or', '0', 'return', 'float', '(', 'price', ')', '+', '(', 'float', '(', 'price', ')', '*', 'float', '(', 'vat', ')', ')', '/', '100']
Compute total price including VAT
['Compute', 'total', 'price', 'including', 'VAT']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/abstractbaseanalysis.py#L785-L792
8,899
mitsei/dlkit
dlkit/handcar/repository/managers.py
RepositoryManager.get_repository_admin_session
def get_repository_admin_session(self): """Gets the repository administrative session for creating, updating and deleteing repositories. return: (osid.repository.RepositoryAdminSession) - a RepositoryAdminSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_admin() is false compliance: optional - This method must be implemented if supports_repository_admin() is true. """ if not self.supports_repository_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.RepositoryAdminSession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
python
def get_repository_admin_session(self): """Gets the repository administrative session for creating, updating and deleteing repositories. return: (osid.repository.RepositoryAdminSession) - a RepositoryAdminSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_admin() is false compliance: optional - This method must be implemented if supports_repository_admin() is true. """ if not self.supports_repository_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.RepositoryAdminSession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
['def', 'get_repository_admin_session', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'supports_repository_admin', '(', ')', ':', 'raise', 'Unimplemented', '(', ')', 'try', ':', 'from', '.', 'import', 'sessions', 'except', 'ImportError', ':', 'raise', '# OperationFailed()', 'try', ':', 'session', '=', 'sessions', '.', 'RepositoryAdminSession', '(', 'proxy', '=', 'self', '.', '_proxy', ',', 'runtime', '=', 'self', '.', '_runtime', ')', 'except', 'AttributeError', ':', 'raise', '# OperationFailed()', 'return', 'session']
Gets the repository administrative session for creating, updating and deleteing repositories. return: (osid.repository.RepositoryAdminSession) - a RepositoryAdminSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_repository_admin() is false compliance: optional - This method must be implemented if supports_repository_admin() is true.
['Gets', 'the', 'repository', 'administrative', 'session', 'for', 'creating', 'updating', 'and', 'deleteing', 'repositories', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L1728-L1751