text
stringlengths 89
104k
| code_tokens
list | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def month_indices(months):
"""Convert string labels for months to integer indices.
Parameters
----------
months : str, int
If int, number of the desired month, where January=1, February=2,
etc. If str, must match either 'ann' or some subset of
'jfmamjjasond'. If 'ann', use all months. Otherwise, use the
specified months.
Returns
-------
np.ndarray of integers corresponding to desired month indices
Raises
------
TypeError : If `months` is not an int or str
See also
--------
_month_conditional
"""
if not isinstance(months, (int, str)):
raise TypeError("`months` must be of type int or str: "
"type(months) == {}".format(type(months)))
if isinstance(months, int):
return [months]
if months.lower() == 'ann':
return np.arange(1, 13)
first_letter = 'jfmamjjasond' * 2
# Python indexing starts at 0; month indices start at 1 for January.
count = first_letter.count(months)
if (count == 0) or (count > 2):
message = ("The user must provide a unique pattern of consecutive "
"first letters of months within '{}'. The provided "
"string '{}' does not comply."
" For individual months use integers."
"".format(first_letter, months))
raise ValueError(message)
st_ind = first_letter.find(months.lower())
return np.arange(st_ind, st_ind + len(months)) % 12 + 1 | [
"def",
"month_indices",
"(",
"months",
")",
":",
"if",
"not",
"isinstance",
"(",
"months",
",",
"(",
"int",
",",
"str",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"`months` must be of type int or str: \"",
"\"type(months) == {}\"",
".",
"format",
"(",
"type",
"(",
"months",
")",
")",
")",
"if",
"isinstance",
"(",
"months",
",",
"int",
")",
":",
"return",
"[",
"months",
"]",
"if",
"months",
".",
"lower",
"(",
")",
"==",
"'ann'",
":",
"return",
"np",
".",
"arange",
"(",
"1",
",",
"13",
")",
"first_letter",
"=",
"'jfmamjjasond'",
"*",
"2",
"# Python indexing starts at 0; month indices start at 1 for January.",
"count",
"=",
"first_letter",
".",
"count",
"(",
"months",
")",
"if",
"(",
"count",
"==",
"0",
")",
"or",
"(",
"count",
">",
"2",
")",
":",
"message",
"=",
"(",
"\"The user must provide a unique pattern of consecutive \"",
"\"first letters of months within '{}'. The provided \"",
"\"string '{}' does not comply.\"",
"\" For individual months use integers.\"",
"\"\"",
".",
"format",
"(",
"first_letter",
",",
"months",
")",
")",
"raise",
"ValueError",
"(",
"message",
")",
"st_ind",
"=",
"first_letter",
".",
"find",
"(",
"months",
".",
"lower",
"(",
")",
")",
"return",
"np",
".",
"arange",
"(",
"st_ind",
",",
"st_ind",
"+",
"len",
"(",
"months",
")",
")",
"%",
"12",
"+",
"1"
]
| 35.452381 | 20.119048 |
def create_load_balancer(self, name, zones, listeners, subnets=None,
security_groups=None):
"""
Create a new load balancer for your account. By default the load
balancer will be created in EC2. To create a load balancer inside a
VPC, parameter zones must be set to None and subnets must not be None.
The load balancer will be automatically created under the VPC that
contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
:type zones: List of strings
:param zones: The names of the availability zone(s) to add.
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber,
Protocol, [SSLCertificateId])
where LoadBalancerPortNumber and InstancePortNumber
are integer values between 1 and 65535, Protocol is a
string containing either 'TCP', 'HTTP' or 'HTTPS';
SSLCertificateID is the ARN of a AWS AIM certificate,
and must be specified when doing HTTPS.
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {'LoadBalancerName' : name}
for index, listener in enumerate(listeners):
i = index + 1
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if listener[2]=='HTTPS':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
if zones:
self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
if subnets:
self.build_list_params(params, subnets, 'Subnets.member.%d')
if security_groups:
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
load_balancer = self.get_object('CreateLoadBalancer',
params, LoadBalancer)
load_balancer.name = name
load_balancer.listeners = listeners
load_balancer.availability_zones = zones
load_balancer.subnets = subnets
load_balancer.security_groups = security_groups
return load_balancer | [
"def",
"create_load_balancer",
"(",
"self",
",",
"name",
",",
"zones",
",",
"listeners",
",",
"subnets",
"=",
"None",
",",
"security_groups",
"=",
"None",
")",
":",
"params",
"=",
"{",
"'LoadBalancerName'",
":",
"name",
"}",
"for",
"index",
",",
"listener",
"in",
"enumerate",
"(",
"listeners",
")",
":",
"i",
"=",
"index",
"+",
"1",
"params",
"[",
"'Listeners.member.%d.LoadBalancerPort'",
"%",
"i",
"]",
"=",
"listener",
"[",
"0",
"]",
"params",
"[",
"'Listeners.member.%d.InstancePort'",
"%",
"i",
"]",
"=",
"listener",
"[",
"1",
"]",
"params",
"[",
"'Listeners.member.%d.Protocol'",
"%",
"i",
"]",
"=",
"listener",
"[",
"2",
"]",
"if",
"listener",
"[",
"2",
"]",
"==",
"'HTTPS'",
":",
"params",
"[",
"'Listeners.member.%d.SSLCertificateId'",
"%",
"i",
"]",
"=",
"listener",
"[",
"3",
"]",
"if",
"zones",
":",
"self",
".",
"build_list_params",
"(",
"params",
",",
"zones",
",",
"'AvailabilityZones.member.%d'",
")",
"if",
"subnets",
":",
"self",
".",
"build_list_params",
"(",
"params",
",",
"subnets",
",",
"'Subnets.member.%d'",
")",
"if",
"security_groups",
":",
"self",
".",
"build_list_params",
"(",
"params",
",",
"security_groups",
",",
"'SecurityGroups.member.%d'",
")",
"load_balancer",
"=",
"self",
".",
"get_object",
"(",
"'CreateLoadBalancer'",
",",
"params",
",",
"LoadBalancer",
")",
"load_balancer",
".",
"name",
"=",
"name",
"load_balancer",
".",
"listeners",
"=",
"listeners",
"load_balancer",
".",
"availability_zones",
"=",
"zones",
"load_balancer",
".",
"subnets",
"=",
"subnets",
"load_balancer",
".",
"security_groups",
"=",
"security_groups",
"return",
"load_balancer"
]
| 47.703704 | 23.777778 |
def convert_serializer_field(field, is_input=True):
"""
Converts a django rest frameworks field to a graphql field
and marks the field as required if we are creating an input type
and the field itself is required
"""
graphql_type = get_graphene_type_from_serializer_field(field)
args = []
kwargs = {"description": field.help_text, "required": is_input and field.required}
# if it is a tuple or a list it means that we are returning
# the graphql type and the child type
if isinstance(graphql_type, (list, tuple)):
kwargs["of_type"] = graphql_type[1]
graphql_type = graphql_type[0]
if isinstance(field, serializers.ModelSerializer):
if is_input:
graphql_type = convert_serializer_to_input_type(field.__class__)
else:
global_registry = get_global_registry()
field_model = field.Meta.model
args = [global_registry.get_type_for_model(field_model)]
elif isinstance(field, serializers.ListSerializer):
field = field.child
if is_input:
kwargs["of_type"] = convert_serializer_to_input_type(field.__class__)
else:
del kwargs["of_type"]
global_registry = get_global_registry()
field_model = field.Meta.model
args = [global_registry.get_type_for_model(field_model)]
return graphql_type(*args, **kwargs) | [
"def",
"convert_serializer_field",
"(",
"field",
",",
"is_input",
"=",
"True",
")",
":",
"graphql_type",
"=",
"get_graphene_type_from_serializer_field",
"(",
"field",
")",
"args",
"=",
"[",
"]",
"kwargs",
"=",
"{",
"\"description\"",
":",
"field",
".",
"help_text",
",",
"\"required\"",
":",
"is_input",
"and",
"field",
".",
"required",
"}",
"# if it is a tuple or a list it means that we are returning",
"# the graphql type and the child type",
"if",
"isinstance",
"(",
"graphql_type",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"kwargs",
"[",
"\"of_type\"",
"]",
"=",
"graphql_type",
"[",
"1",
"]",
"graphql_type",
"=",
"graphql_type",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"field",
",",
"serializers",
".",
"ModelSerializer",
")",
":",
"if",
"is_input",
":",
"graphql_type",
"=",
"convert_serializer_to_input_type",
"(",
"field",
".",
"__class__",
")",
"else",
":",
"global_registry",
"=",
"get_global_registry",
"(",
")",
"field_model",
"=",
"field",
".",
"Meta",
".",
"model",
"args",
"=",
"[",
"global_registry",
".",
"get_type_for_model",
"(",
"field_model",
")",
"]",
"elif",
"isinstance",
"(",
"field",
",",
"serializers",
".",
"ListSerializer",
")",
":",
"field",
"=",
"field",
".",
"child",
"if",
"is_input",
":",
"kwargs",
"[",
"\"of_type\"",
"]",
"=",
"convert_serializer_to_input_type",
"(",
"field",
".",
"__class__",
")",
"else",
":",
"del",
"kwargs",
"[",
"\"of_type\"",
"]",
"global_registry",
"=",
"get_global_registry",
"(",
")",
"field_model",
"=",
"field",
".",
"Meta",
".",
"model",
"args",
"=",
"[",
"global_registry",
".",
"get_type_for_model",
"(",
"field_model",
")",
"]",
"return",
"graphql_type",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
]
| 38.361111 | 19.472222 |
def list_submissions():
"""List the past submissions with information about them"""
submissions = []
try:
submissions = session.query(Submission).all()
except SQLAlchemyError as e:
session.rollback()
return render_template('list_submissions.html', submissions=submissions) | [
"def",
"list_submissions",
"(",
")",
":",
"submissions",
"=",
"[",
"]",
"try",
":",
"submissions",
"=",
"session",
".",
"query",
"(",
"Submission",
")",
".",
"all",
"(",
")",
"except",
"SQLAlchemyError",
"as",
"e",
":",
"session",
".",
"rollback",
"(",
")",
"return",
"render_template",
"(",
"'list_submissions.html'",
",",
"submissions",
"=",
"submissions",
")"
]
| 37.625 | 17.5 |
def plot_campaign(self, campaign=0, annotate_channels=True, **kwargs):
"""Plot all the active channels of a campaign."""
fov = getKeplerFov(campaign)
corners = fov.getCoordsOfChannelCorners()
for ch in np.arange(1, 85, dtype=int):
if ch in fov.brokenChannels:
continue # certain channel are no longer used
idx = np.where(corners[::, 2] == ch)
mdl = int(corners[idx, 0][0][0])
out = int(corners[idx, 1][0][0])
ra = corners[idx, 3][0]
if campaign == 1002: # Concept Engineering Test overlapped the meridian
ra[ra < 180] += 360
dec = corners[idx, 4][0]
self.ax.fill(np.concatenate((ra, ra[:1])),
np.concatenate((dec, dec[:1])), **kwargs)
if annotate_channels:
txt = "K2C{0}\n{1}.{2}\n#{3}".format(campaign, mdl, out, ch)
txt = "{1}.{2}\n#{3}".format(campaign, mdl, out, ch)
self.ax.text(np.mean(ra), np.mean(dec), txt,
ha="center", va="center",
zorder=91, fontsize=10,
color="#000000", clip_on=True) | [
"def",
"plot_campaign",
"(",
"self",
",",
"campaign",
"=",
"0",
",",
"annotate_channels",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"fov",
"=",
"getKeplerFov",
"(",
"campaign",
")",
"corners",
"=",
"fov",
".",
"getCoordsOfChannelCorners",
"(",
")",
"for",
"ch",
"in",
"np",
".",
"arange",
"(",
"1",
",",
"85",
",",
"dtype",
"=",
"int",
")",
":",
"if",
"ch",
"in",
"fov",
".",
"brokenChannels",
":",
"continue",
"# certain channel are no longer used",
"idx",
"=",
"np",
".",
"where",
"(",
"corners",
"[",
":",
":",
",",
"2",
"]",
"==",
"ch",
")",
"mdl",
"=",
"int",
"(",
"corners",
"[",
"idx",
",",
"0",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"out",
"=",
"int",
"(",
"corners",
"[",
"idx",
",",
"1",
"]",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"ra",
"=",
"corners",
"[",
"idx",
",",
"3",
"]",
"[",
"0",
"]",
"if",
"campaign",
"==",
"1002",
":",
"# Concept Engineering Test overlapped the meridian",
"ra",
"[",
"ra",
"<",
"180",
"]",
"+=",
"360",
"dec",
"=",
"corners",
"[",
"idx",
",",
"4",
"]",
"[",
"0",
"]",
"self",
".",
"ax",
".",
"fill",
"(",
"np",
".",
"concatenate",
"(",
"(",
"ra",
",",
"ra",
"[",
":",
"1",
"]",
")",
")",
",",
"np",
".",
"concatenate",
"(",
"(",
"dec",
",",
"dec",
"[",
":",
"1",
"]",
")",
")",
",",
"*",
"*",
"kwargs",
")",
"if",
"annotate_channels",
":",
"txt",
"=",
"\"K2C{0}\\n{1}.{2}\\n#{3}\"",
".",
"format",
"(",
"campaign",
",",
"mdl",
",",
"out",
",",
"ch",
")",
"txt",
"=",
"\"{1}.{2}\\n#{3}\"",
".",
"format",
"(",
"campaign",
",",
"mdl",
",",
"out",
",",
"ch",
")",
"self",
".",
"ax",
".",
"text",
"(",
"np",
".",
"mean",
"(",
"ra",
")",
",",
"np",
".",
"mean",
"(",
"dec",
")",
",",
"txt",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"center\"",
",",
"zorder",
"=",
"91",
",",
"fontsize",
"=",
"10",
",",
"color",
"=",
"\"#000000\"",
",",
"clip_on",
"=",
"True",
")"
]
| 50.333333 | 15.041667 |
def unmarkelect_comment(self, msg_data_id, index, user_comment_id):
"""
将评论取消精选
"""
return self._post(
'comment/unmarkelect',
data={
'msg_data_id': msg_data_id,
'index': index,
'user_comment_id': user_comment_id,
}) | [
"def",
"unmarkelect_comment",
"(",
"self",
",",
"msg_data_id",
",",
"index",
",",
"user_comment_id",
")",
":",
"return",
"self",
".",
"_post",
"(",
"'comment/unmarkelect'",
",",
"data",
"=",
"{",
"'msg_data_id'",
":",
"msg_data_id",
",",
"'index'",
":",
"index",
",",
"'user_comment_id'",
":",
"user_comment_id",
",",
"}",
")"
]
| 29.181818 | 13 |
def add_text_item(self, collection_uri, name, metadata, text, title=None):
"""Add a new item to a collection containing a single
text document.
The full text of the text document is specified as the text
argument and will be stored with the same name as the
item and a .txt extension.
This is a shorthand for the more general add_item method.
:param collection_uri: The URI that references the collection
:type collection_uri: String
:param name: The item name, suitable for use in a URI (no spaces)
:type name: String
:param metadata: a dictionary of metadata values describing the item
:type metadata: Dict
:param text: the full text of the document associated with this item
:type text: String
:param title: document title, defaults to the item name
:type title: String
:rtype String
:returns: the URI of the created item
:raises: APIError if the request was not successful
"""
docname = name + ".txt"
if title is None:
title = name
metadata['dcterms:identifier'] = name
metadata['@type'] = 'ausnc:AusNCObject'
metadata['hcsvlab:display_document'] = {'@id': docname}
metadata['hcsvlab:indexable_document'] = {'@id': docname}
metadata['ausnc:document'] = [{ '@id': 'document1.txt',
'@type': 'foaf:Document',
'dcterms:extent': len(text),
'dcterms:identifier': docname,
'dcterms:title': title,
'dcterms:type': 'Text'}]
meta = {'items': [{'metadata': { '@context': self.context,
'@graph': [metadata]
},
'documents': [{'content': text, 'identifier': docname}]
}]
}
response = self.api_request(collection_uri, method='POST', data=json.dumps(meta))
# this will raise an exception if the request fails
self.__check_success(response)
item_uri = collection_uri + "/" + response['success'][0]
return item_uri | [
"def",
"add_text_item",
"(",
"self",
",",
"collection_uri",
",",
"name",
",",
"metadata",
",",
"text",
",",
"title",
"=",
"None",
")",
":",
"docname",
"=",
"name",
"+",
"\".txt\"",
"if",
"title",
"is",
"None",
":",
"title",
"=",
"name",
"metadata",
"[",
"'dcterms:identifier'",
"]",
"=",
"name",
"metadata",
"[",
"'@type'",
"]",
"=",
"'ausnc:AusNCObject'",
"metadata",
"[",
"'hcsvlab:display_document'",
"]",
"=",
"{",
"'@id'",
":",
"docname",
"}",
"metadata",
"[",
"'hcsvlab:indexable_document'",
"]",
"=",
"{",
"'@id'",
":",
"docname",
"}",
"metadata",
"[",
"'ausnc:document'",
"]",
"=",
"[",
"{",
"'@id'",
":",
"'document1.txt'",
",",
"'@type'",
":",
"'foaf:Document'",
",",
"'dcterms:extent'",
":",
"len",
"(",
"text",
")",
",",
"'dcterms:identifier'",
":",
"docname",
",",
"'dcterms:title'",
":",
"title",
",",
"'dcterms:type'",
":",
"'Text'",
"}",
"]",
"meta",
"=",
"{",
"'items'",
":",
"[",
"{",
"'metadata'",
":",
"{",
"'@context'",
":",
"self",
".",
"context",
",",
"'@graph'",
":",
"[",
"metadata",
"]",
"}",
",",
"'documents'",
":",
"[",
"{",
"'content'",
":",
"text",
",",
"'identifier'",
":",
"docname",
"}",
"]",
"}",
"]",
"}",
"response",
"=",
"self",
".",
"api_request",
"(",
"collection_uri",
",",
"method",
"=",
"'POST'",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"meta",
")",
")",
"# this will raise an exception if the request fails",
"self",
".",
"__check_success",
"(",
"response",
")",
"item_uri",
"=",
"collection_uri",
"+",
"\"/\"",
"+",
"response",
"[",
"'success'",
"]",
"[",
"0",
"]",
"return",
"item_uri"
]
| 36.333333 | 25.47619 |
def calc_new_nonce_hash(self, new_nonce, number):
"""
Calculates the new nonce hash based on the current attributes.
:param new_nonce: the new nonce to be hashed.
:param number: number to prepend before the hash.
:return: the hash for the given new nonce.
"""
new_nonce = new_nonce.to_bytes(32, 'little', signed=True)
data = new_nonce + struct.pack('<BQ', number, self.aux_hash)
# Calculates the message key from the given data
return int.from_bytes(sha1(data).digest()[4:20], 'little', signed=True) | [
"def",
"calc_new_nonce_hash",
"(",
"self",
",",
"new_nonce",
",",
"number",
")",
":",
"new_nonce",
"=",
"new_nonce",
".",
"to_bytes",
"(",
"32",
",",
"'little'",
",",
"signed",
"=",
"True",
")",
"data",
"=",
"new_nonce",
"+",
"struct",
".",
"pack",
"(",
"'<BQ'",
",",
"number",
",",
"self",
".",
"aux_hash",
")",
"# Calculates the message key from the given data",
"return",
"int",
".",
"from_bytes",
"(",
"sha1",
"(",
"data",
")",
".",
"digest",
"(",
")",
"[",
"4",
":",
"20",
"]",
",",
"'little'",
",",
"signed",
"=",
"True",
")"
]
| 43.769231 | 20.538462 |
def set_layer(self, layer=None, keywords=None):
"""Set layer and update UI accordingly.
:param layer: A QgsVectorLayer.
:type layer: QgsVectorLayer
:param keywords: Keywords for the layer.
:type keywords: dict, None
"""
if self.field_mapping_widget is not None:
self.field_mapping_widget.setParent(None)
self.field_mapping_widget.close()
self.field_mapping_widget.deleteLater()
self.main_layout.removeWidget(self.field_mapping_widget)
self.field_mapping_widget = None
if layer:
self.layer = layer
else:
self.layer = self.layer_combo_box.currentLayer()
if not self.layer:
return
if keywords is not None:
self.metadata = keywords
else:
# Always read from metadata file.
try:
self.metadata = self.keyword_io.read_keywords(self.layer)
except (
NoKeywordsFoundError,
KeywordNotFoundError,
MetadataReadError) as e:
raise e
if 'inasafe_default_values' not in self.metadata:
self.metadata['inasafe_default_values'] = {}
if 'inasafe_fields' not in self.metadata:
self.metadata['inasafe_fields'] = {}
self.field_mapping_widget = FieldMappingWidget(
parent=self, iface=self.iface)
self.field_mapping_widget.set_layer(self.layer, self.metadata)
self.field_mapping_widget.show()
self.main_layout.addWidget(self.field_mapping_widget)
# Set header label
group_names = [
self.field_mapping_widget.tabText(i) for i in range(
self.field_mapping_widget.count())]
if len(group_names) == 0:
header_text = tr(
'There is no field group for this layer. Please select '
'another layer.')
self.header_label.setText(header_text)
return
elif len(group_names) == 1:
pretty_group_name = group_names[0]
elif len(group_names) == 2:
pretty_group_name = group_names[0] + tr(' and ') + group_names[1]
else:
pretty_group_name = ', '.join(group_names[:-1])
pretty_group_name += tr(', and {0}').format(group_names[-1])
header_text = tr(
'Please fill the information for every tab to determine the '
'attribute for {0} group.').format(pretty_group_name)
self.header_label.setText(header_text) | [
"def",
"set_layer",
"(",
"self",
",",
"layer",
"=",
"None",
",",
"keywords",
"=",
"None",
")",
":",
"if",
"self",
".",
"field_mapping_widget",
"is",
"not",
"None",
":",
"self",
".",
"field_mapping_widget",
".",
"setParent",
"(",
"None",
")",
"self",
".",
"field_mapping_widget",
".",
"close",
"(",
")",
"self",
".",
"field_mapping_widget",
".",
"deleteLater",
"(",
")",
"self",
".",
"main_layout",
".",
"removeWidget",
"(",
"self",
".",
"field_mapping_widget",
")",
"self",
".",
"field_mapping_widget",
"=",
"None",
"if",
"layer",
":",
"self",
".",
"layer",
"=",
"layer",
"else",
":",
"self",
".",
"layer",
"=",
"self",
".",
"layer_combo_box",
".",
"currentLayer",
"(",
")",
"if",
"not",
"self",
".",
"layer",
":",
"return",
"if",
"keywords",
"is",
"not",
"None",
":",
"self",
".",
"metadata",
"=",
"keywords",
"else",
":",
"# Always read from metadata file.",
"try",
":",
"self",
".",
"metadata",
"=",
"self",
".",
"keyword_io",
".",
"read_keywords",
"(",
"self",
".",
"layer",
")",
"except",
"(",
"NoKeywordsFoundError",
",",
"KeywordNotFoundError",
",",
"MetadataReadError",
")",
"as",
"e",
":",
"raise",
"e",
"if",
"'inasafe_default_values'",
"not",
"in",
"self",
".",
"metadata",
":",
"self",
".",
"metadata",
"[",
"'inasafe_default_values'",
"]",
"=",
"{",
"}",
"if",
"'inasafe_fields'",
"not",
"in",
"self",
".",
"metadata",
":",
"self",
".",
"metadata",
"[",
"'inasafe_fields'",
"]",
"=",
"{",
"}",
"self",
".",
"field_mapping_widget",
"=",
"FieldMappingWidget",
"(",
"parent",
"=",
"self",
",",
"iface",
"=",
"self",
".",
"iface",
")",
"self",
".",
"field_mapping_widget",
".",
"set_layer",
"(",
"self",
".",
"layer",
",",
"self",
".",
"metadata",
")",
"self",
".",
"field_mapping_widget",
".",
"show",
"(",
")",
"self",
".",
"main_layout",
".",
"addWidget",
"(",
"self",
".",
"field_mapping_widget",
")",
"# Set header label",
"group_names",
"=",
"[",
"self",
".",
"field_mapping_widget",
".",
"tabText",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"field_mapping_widget",
".",
"count",
"(",
")",
")",
"]",
"if",
"len",
"(",
"group_names",
")",
"==",
"0",
":",
"header_text",
"=",
"tr",
"(",
"'There is no field group for this layer. Please select '",
"'another layer.'",
")",
"self",
".",
"header_label",
".",
"setText",
"(",
"header_text",
")",
"return",
"elif",
"len",
"(",
"group_names",
")",
"==",
"1",
":",
"pretty_group_name",
"=",
"group_names",
"[",
"0",
"]",
"elif",
"len",
"(",
"group_names",
")",
"==",
"2",
":",
"pretty_group_name",
"=",
"group_names",
"[",
"0",
"]",
"+",
"tr",
"(",
"' and '",
")",
"+",
"group_names",
"[",
"1",
"]",
"else",
":",
"pretty_group_name",
"=",
"', '",
".",
"join",
"(",
"group_names",
"[",
":",
"-",
"1",
"]",
")",
"pretty_group_name",
"+=",
"tr",
"(",
"', and {0}'",
")",
".",
"format",
"(",
"group_names",
"[",
"-",
"1",
"]",
")",
"header_text",
"=",
"tr",
"(",
"'Please fill the information for every tab to determine the '",
"'attribute for {0} group.'",
")",
".",
"format",
"(",
"pretty_group_name",
")",
"self",
".",
"header_label",
".",
"setText",
"(",
"header_text",
")"
]
| 39.107692 | 16 |
def download_and_calibrate_parallel(list_of_ids, n=None):
"""Download and calibrate in parallel.
Parameters
----------
list_of_ids : list, optional
container with img_ids to process
n : int
Number of cores for the parallel processing. Default: n_cores_system//2
"""
setup_cluster(n_cores=n)
c = Client()
lbview = c.load_balanced_view()
lbview.map_async(download_and_calibrate, list_of_ids)
subprocess.Popen(["ipcluster", "stop", "--quiet"]) | [
"def",
"download_and_calibrate_parallel",
"(",
"list_of_ids",
",",
"n",
"=",
"None",
")",
":",
"setup_cluster",
"(",
"n_cores",
"=",
"n",
")",
"c",
"=",
"Client",
"(",
")",
"lbview",
"=",
"c",
".",
"load_balanced_view",
"(",
")",
"lbview",
".",
"map_async",
"(",
"download_and_calibrate",
",",
"list_of_ids",
")",
"subprocess",
".",
"Popen",
"(",
"[",
"\"ipcluster\"",
",",
"\"stop\"",
",",
"\"--quiet\"",
"]",
")"
]
| 32.466667 | 17.2 |
def to_str(self):
'''
Returns string representation of a social account. Includes the name
of the user.
'''
dflt = super(DataportenAccount, self).to_str()
return '%s (%s)' % (
self.account.extra_data.get('name', ''),
dflt,
) | [
"def",
"to_str",
"(",
"self",
")",
":",
"dflt",
"=",
"super",
"(",
"DataportenAccount",
",",
"self",
")",
".",
"to_str",
"(",
")",
"return",
"'%s (%s)'",
"%",
"(",
"self",
".",
"account",
".",
"extra_data",
".",
"get",
"(",
"'name'",
",",
"''",
")",
",",
"dflt",
",",
")"
]
| 29.5 | 22.9 |
def collect_dashboard_js(collector):
"""Generate dashboard javascript for each dashboard"""
dashmat = collector.configuration["dashmat"]
modules = collector.configuration["__active_modules__"]
compiled_static_prep = dashmat.compiled_static_prep
compiled_static_folder = dashmat.compiled_static_folder
npm_deps = list_npm_modules(collector, no_print=True)
react_server = ReactServer()
react_server.prepare(npm_deps, compiled_static_folder)
for dashboard in collector.configuration["dashboards"].values():
log.info("Generating compiled javascript for dashboard:{0}".format(dashboard.path))
filename = dashboard.path.replace("_", "__").replace("/", "_")
location = os.path.join(compiled_static_folder, "dashboards", "{0}.js".format(filename))
if os.path.exists(location):
os.remove(location)
generate_dashboard_js(dashboard, react_server, compiled_static_folder, compiled_static_prep, modules) | [
"def",
"collect_dashboard_js",
"(",
"collector",
")",
":",
"dashmat",
"=",
"collector",
".",
"configuration",
"[",
"\"dashmat\"",
"]",
"modules",
"=",
"collector",
".",
"configuration",
"[",
"\"__active_modules__\"",
"]",
"compiled_static_prep",
"=",
"dashmat",
".",
"compiled_static_prep",
"compiled_static_folder",
"=",
"dashmat",
".",
"compiled_static_folder",
"npm_deps",
"=",
"list_npm_modules",
"(",
"collector",
",",
"no_print",
"=",
"True",
")",
"react_server",
"=",
"ReactServer",
"(",
")",
"react_server",
".",
"prepare",
"(",
"npm_deps",
",",
"compiled_static_folder",
")",
"for",
"dashboard",
"in",
"collector",
".",
"configuration",
"[",
"\"dashboards\"",
"]",
".",
"values",
"(",
")",
":",
"log",
".",
"info",
"(",
"\"Generating compiled javascript for dashboard:{0}\"",
".",
"format",
"(",
"dashboard",
".",
"path",
")",
")",
"filename",
"=",
"dashboard",
".",
"path",
".",
"replace",
"(",
"\"_\"",
",",
"\"__\"",
")",
".",
"replace",
"(",
"\"/\"",
",",
"\"_\"",
")",
"location",
"=",
"os",
".",
"path",
".",
"join",
"(",
"compiled_static_folder",
",",
"\"dashboards\"",
",",
"\"{0}.js\"",
".",
"format",
"(",
"filename",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"location",
")",
":",
"os",
".",
"remove",
"(",
"location",
")",
"generate_dashboard_js",
"(",
"dashboard",
",",
"react_server",
",",
"compiled_static_folder",
",",
"compiled_static_prep",
",",
"modules",
")"
]
| 50.684211 | 25 |
def save_obj(self, path, update_normals=True):
"""Save data with OBJ format
:param stl path:
:param bool update_normals:
"""
if update_normals:
self.update_normals()
# Create triangle_list
vectors_key_list = []
vectors_list = []
normals_key_list = []
normals_list = []
triangle_list = []
for i, vector in enumerate(self.vectors):
one_triangle = []
for j in range(3):
v_key = ",".join(map(str, self.vectors[i][j][:3]))
if v_key in vectors_key_list:
v_index = vectors_key_list.index(v_key)
else:
v_index = len(vectors_key_list)
vectors_key_list.append(v_key)
vectors_list.append(self.vectors[i][j][:3])
one_triangle.append(v_index + 1)
n_key = ",".join(map(str, self.normals[i][:3]))
if n_key in normals_key_list:
n_index = normals_key_list.index(n_key)
else:
n_index = len(normals_key_list)
normals_key_list.append(n_key)
normals_list.append(self.normals[i][:3])
# print(normals_list)
triangle_list.append((one_triangle, n_index + 1))
with open(path, "wb") as fh:
print("# {} {}".format(__title__, __version__), file=fh)
print("# {}".format(datetime.datetime.now()), file=fh)
print("# {}".format(__url__), file=fh)
print("", file=fh)
for v in vectors_list:
print("v {} {} {}".format(v[0], v[1], v[2]), file=fh)
for vn in normals_list:
print("vn {} {} {}".format(vn[0], vn[1], vn[2]), file=fh)
for t in triangle_list:
faces = t[0]
normal = t[1]
print("f {}//{} {}//{} {}//{}".format(
faces[0], normal,
faces[1], normal,
faces[2], normal,
), file=fh) | [
"def",
"save_obj",
"(",
"self",
",",
"path",
",",
"update_normals",
"=",
"True",
")",
":",
"if",
"update_normals",
":",
"self",
".",
"update_normals",
"(",
")",
"# Create triangle_list",
"vectors_key_list",
"=",
"[",
"]",
"vectors_list",
"=",
"[",
"]",
"normals_key_list",
"=",
"[",
"]",
"normals_list",
"=",
"[",
"]",
"triangle_list",
"=",
"[",
"]",
"for",
"i",
",",
"vector",
"in",
"enumerate",
"(",
"self",
".",
"vectors",
")",
":",
"one_triangle",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"3",
")",
":",
"v_key",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"self",
".",
"vectors",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
":",
"3",
"]",
")",
")",
"if",
"v_key",
"in",
"vectors_key_list",
":",
"v_index",
"=",
"vectors_key_list",
".",
"index",
"(",
"v_key",
")",
"else",
":",
"v_index",
"=",
"len",
"(",
"vectors_key_list",
")",
"vectors_key_list",
".",
"append",
"(",
"v_key",
")",
"vectors_list",
".",
"append",
"(",
"self",
".",
"vectors",
"[",
"i",
"]",
"[",
"j",
"]",
"[",
":",
"3",
"]",
")",
"one_triangle",
".",
"append",
"(",
"v_index",
"+",
"1",
")",
"n_key",
"=",
"\",\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"self",
".",
"normals",
"[",
"i",
"]",
"[",
":",
"3",
"]",
")",
")",
"if",
"n_key",
"in",
"normals_key_list",
":",
"n_index",
"=",
"normals_key_list",
".",
"index",
"(",
"n_key",
")",
"else",
":",
"n_index",
"=",
"len",
"(",
"normals_key_list",
")",
"normals_key_list",
".",
"append",
"(",
"n_key",
")",
"normals_list",
".",
"append",
"(",
"self",
".",
"normals",
"[",
"i",
"]",
"[",
":",
"3",
"]",
")",
"# print(normals_list)",
"triangle_list",
".",
"append",
"(",
"(",
"one_triangle",
",",
"n_index",
"+",
"1",
")",
")",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"fh",
":",
"print",
"(",
"\"# {} {}\"",
".",
"format",
"(",
"__title__",
",",
"__version__",
")",
",",
"file",
"=",
"fh",
")",
"print",
"(",
"\"# {}\"",
".",
"format",
"(",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
",",
"file",
"=",
"fh",
")",
"print",
"(",
"\"# {}\"",
".",
"format",
"(",
"__url__",
")",
",",
"file",
"=",
"fh",
")",
"print",
"(",
"\"\"",
",",
"file",
"=",
"fh",
")",
"for",
"v",
"in",
"vectors_list",
":",
"print",
"(",
"\"v {} {} {}\"",
".",
"format",
"(",
"v",
"[",
"0",
"]",
",",
"v",
"[",
"1",
"]",
",",
"v",
"[",
"2",
"]",
")",
",",
"file",
"=",
"fh",
")",
"for",
"vn",
"in",
"normals_list",
":",
"print",
"(",
"\"vn {} {} {}\"",
".",
"format",
"(",
"vn",
"[",
"0",
"]",
",",
"vn",
"[",
"1",
"]",
",",
"vn",
"[",
"2",
"]",
")",
",",
"file",
"=",
"fh",
")",
"for",
"t",
"in",
"triangle_list",
":",
"faces",
"=",
"t",
"[",
"0",
"]",
"normal",
"=",
"t",
"[",
"1",
"]",
"print",
"(",
"\"f {}//{} {}//{} {}//{}\"",
".",
"format",
"(",
"faces",
"[",
"0",
"]",
",",
"normal",
",",
"faces",
"[",
"1",
"]",
",",
"normal",
",",
"faces",
"[",
"2",
"]",
",",
"normal",
",",
")",
",",
"file",
"=",
"fh",
")"
]
| 37.2 | 14.636364 |
def _launch(self, run_config, args, **kwargs):
"""Launch the process and return the process object."""
del kwargs
try:
with sw("popen"):
return subprocess.Popen(args, cwd=run_config.cwd, env=run_config.env)
except OSError:
logging.exception("Failed to launch")
raise SC2LaunchError("Failed to launch: %s" % args) | [
"def",
"_launch",
"(",
"self",
",",
"run_config",
",",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"del",
"kwargs",
"try",
":",
"with",
"sw",
"(",
"\"popen\"",
")",
":",
"return",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"cwd",
"=",
"run_config",
".",
"cwd",
",",
"env",
"=",
"run_config",
".",
"env",
")",
"except",
"OSError",
":",
"logging",
".",
"exception",
"(",
"\"Failed to launch\"",
")",
"raise",
"SC2LaunchError",
"(",
"\"Failed to launch: %s\"",
"%",
"args",
")"
]
| 38.444444 | 17.666667 |
def has_path(nodes, A, B):
r"""Test if nodes from a breadth_first_order search lead from A to
B.
Parameters
----------
nodes : array_like
Nodes from breadth_first_oder_seatch
A : array_like
The set of educt states
B : array_like
The set of product states
Returns
-------
has_path : boolean
True if there exists a path, else False
"""
x1 = np.intersect1d(nodes, A).size > 0
x2 = np.intersect1d(nodes, B).size > 0
return x1 and x2 | [
"def",
"has_path",
"(",
"nodes",
",",
"A",
",",
"B",
")",
":",
"x1",
"=",
"np",
".",
"intersect1d",
"(",
"nodes",
",",
"A",
")",
".",
"size",
">",
"0",
"x2",
"=",
"np",
".",
"intersect1d",
"(",
"nodes",
",",
"B",
")",
".",
"size",
">",
"0",
"return",
"x1",
"and",
"x2"
]
| 22.636364 | 18.590909 |
def update(self, instance_id: str, details: UpdateDetails, async_allowed: bool) -> UpdateServiceSpec:
"""
Further readings `CF Broker API#Update <https://docs.cloudfoundry.org/services/api.html#updating_service_instance>`_
:param instance_id: Instance id provided by the platform
:param details: Details about the service to update
:param async_allowed: Client allows async creation
:rtype: UpdateServiceSpec
:raises ErrAsyncRequired: If async is required but not supported
"""
raise NotImplementedError() | [
"def",
"update",
"(",
"self",
",",
"instance_id",
":",
"str",
",",
"details",
":",
"UpdateDetails",
",",
"async_allowed",
":",
"bool",
")",
"->",
"UpdateServiceSpec",
":",
"raise",
"NotImplementedError",
"(",
")"
]
| 51.636364 | 26.363636 |
def connect(self):
"""Connects the object to the host:port.
Returns:
Future: a Future object with True as result if the connection
process was ok.
"""
if self.is_connected() or self.is_connecting():
raise tornado.gen.Return(True)
if self.unix_domain_socket is None:
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.tcp_nodelay:
self.__socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1)
else:
if not os.path.exists(self.unix_domain_socket):
LOG.warning("can't connect to %s, file does not exist",
self.unix_domain_socket)
raise tornado.gen.Return(False)
self.__socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.__socket.setblocking(0)
self.__periodic_callback.start()
try:
LOG.debug("connecting to %s...", self._redis_server())
self._state.set_connecting()
if self.unix_domain_socket is None:
self.__socket.connect((self.host, self.port))
else:
self.__socket.connect(self.unix_domain_socket)
except socket.error as e:
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
self.disconnect()
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
self.__socket_fileno = self.__socket.fileno()
self._register_or_update_event_handler()
yield self._state.get_changed_state_future()
if not self.is_connected():
LOG.warning("can't connect to %s", self._redis_server())
raise tornado.gen.Return(False)
else:
LOG.debug("connected to %s", self._redis_server())
self.__socket_fileno = self.__socket.fileno()
self._state.set_connected()
self._register_or_update_event_handler()
raise tornado.gen.Return(True) | [
"def",
"connect",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_connected",
"(",
")",
"or",
"self",
".",
"is_connecting",
"(",
")",
":",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"True",
")",
"if",
"self",
".",
"unix_domain_socket",
"is",
"None",
":",
"self",
".",
"__socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"if",
"self",
".",
"tcp_nodelay",
":",
"self",
".",
"__socket",
".",
"setsockopt",
"(",
"socket",
".",
"IPPROTO_TCP",
",",
"socket",
".",
"TCP_NODELAY",
",",
"1",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"unix_domain_socket",
")",
":",
"LOG",
".",
"warning",
"(",
"\"can't connect to %s, file does not exist\"",
",",
"self",
".",
"unix_domain_socket",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"False",
")",
"self",
".",
"__socket",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_UNIX",
",",
"socket",
".",
"SOCK_STREAM",
")",
"self",
".",
"__socket",
".",
"setblocking",
"(",
"0",
")",
"self",
".",
"__periodic_callback",
".",
"start",
"(",
")",
"try",
":",
"LOG",
".",
"debug",
"(",
"\"connecting to %s...\"",
",",
"self",
".",
"_redis_server",
"(",
")",
")",
"self",
".",
"_state",
".",
"set_connecting",
"(",
")",
"if",
"self",
".",
"unix_domain_socket",
"is",
"None",
":",
"self",
".",
"__socket",
".",
"connect",
"(",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
")",
"else",
":",
"self",
".",
"__socket",
".",
"connect",
"(",
"self",
".",
"unix_domain_socket",
")",
"except",
"socket",
".",
"error",
"as",
"e",
":",
"if",
"(",
"errno_from_exception",
"(",
"e",
")",
"not",
"in",
"_ERRNO_INPROGRESS",
"and",
"errno_from_exception",
"(",
"e",
")",
"not",
"in",
"_ERRNO_WOULDBLOCK",
")",
":",
"self",
".",
"disconnect",
"(",
")",
"LOG",
".",
"warning",
"(",
"\"can't connect to %s\"",
",",
"self",
".",
"_redis_server",
"(",
")",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"False",
")",
"self",
".",
"__socket_fileno",
"=",
"self",
".",
"__socket",
".",
"fileno",
"(",
")",
"self",
".",
"_register_or_update_event_handler",
"(",
")",
"yield",
"self",
".",
"_state",
".",
"get_changed_state_future",
"(",
")",
"if",
"not",
"self",
".",
"is_connected",
"(",
")",
":",
"LOG",
".",
"warning",
"(",
"\"can't connect to %s\"",
",",
"self",
".",
"_redis_server",
"(",
")",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"False",
")",
"else",
":",
"LOG",
".",
"debug",
"(",
"\"connected to %s\"",
",",
"self",
".",
"_redis_server",
"(",
")",
")",
"self",
".",
"__socket_fileno",
"=",
"self",
".",
"__socket",
".",
"fileno",
"(",
")",
"self",
".",
"_state",
".",
"set_connected",
"(",
")",
"self",
".",
"_register_or_update_event_handler",
"(",
")",
"raise",
"tornado",
".",
"gen",
".",
"Return",
"(",
"True",
")"
]
| 46.297872 | 16.531915 |
def sort(args):
"""
%prog sort gffile
Sort gff file using plain old unix sort based on [chromosome, start coordinate].
or topologically based on hierarchy of features using the gt (genometools) toolkit
"""
valid_sort_methods = ("unix", "topo")
p = OptionParser(sort.__doc__)
p.add_option("--method", default="unix", choices=valid_sort_methods,
help="Specify sort method [default: %default]")
p.add_option("-i", dest="inplace", default=False, action="store_true",
help="If doing a unix sort, perform sort inplace [default: %default]")
p.set_tmpdir()
p.set_outfile()
p.set_home("gt")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
sortedgff = opts.outfile
if opts.inplace:
if opts.method == "topo" or (opts.method == "unix" and gffile in ("-", "stdin")):
logging.error("Cannot perform inplace sort when method is `topo`" + \
" or method is `unix` and input is `stdin` stream")
sys.exit()
if opts.method == "unix":
cmd = "sort"
cmd += " -k1,1 -k4,4n {0}".format(gffile)
if opts.tmpdir:
cmd += " -T {0}".format(opts.tmpdir)
if opts.inplace:
cmd += " -o {0}".gffile
sortedgff = None
sh(cmd, outfile=sortedgff)
elif opts.method == "topo":
GT_HOME = opts.gt_home
if not op.isdir(GT_HOME):
logging.error("GT_HOME={0} directory does not exist".format(GT_HOME))
sys.exit()
cmd = "{0}".format(op.join(GT_HOME, "bin", "gt"))
cmd += " gff3 -sort -tidy -retainids -addids no {0}".format(gffile)
sh(cmd, outfile=sortedgff) | [
"def",
"sort",
"(",
"args",
")",
":",
"valid_sort_methods",
"=",
"(",
"\"unix\"",
",",
"\"topo\"",
")",
"p",
"=",
"OptionParser",
"(",
"sort",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--method\"",
",",
"default",
"=",
"\"unix\"",
",",
"choices",
"=",
"valid_sort_methods",
",",
"help",
"=",
"\"Specify sort method [default: %default]\"",
")",
"p",
".",
"add_option",
"(",
"\"-i\"",
",",
"dest",
"=",
"\"inplace\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"If doing a unix sort, perform sort inplace [default: %default]\"",
")",
"p",
".",
"set_tmpdir",
"(",
")",
"p",
".",
"set_outfile",
"(",
")",
"p",
".",
"set_home",
"(",
"\"gt\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"gffile",
",",
"=",
"args",
"sortedgff",
"=",
"opts",
".",
"outfile",
"if",
"opts",
".",
"inplace",
":",
"if",
"opts",
".",
"method",
"==",
"\"topo\"",
"or",
"(",
"opts",
".",
"method",
"==",
"\"unix\"",
"and",
"gffile",
"in",
"(",
"\"-\"",
",",
"\"stdin\"",
")",
")",
":",
"logging",
".",
"error",
"(",
"\"Cannot perform inplace sort when method is `topo`\"",
"+",
"\" or method is `unix` and input is `stdin` stream\"",
")",
"sys",
".",
"exit",
"(",
")",
"if",
"opts",
".",
"method",
"==",
"\"unix\"",
":",
"cmd",
"=",
"\"sort\"",
"cmd",
"+=",
"\" -k1,1 -k4,4n {0}\"",
".",
"format",
"(",
"gffile",
")",
"if",
"opts",
".",
"tmpdir",
":",
"cmd",
"+=",
"\" -T {0}\"",
".",
"format",
"(",
"opts",
".",
"tmpdir",
")",
"if",
"opts",
".",
"inplace",
":",
"cmd",
"+=",
"\" -o {0}\"",
".",
"gffile",
"sortedgff",
"=",
"None",
"sh",
"(",
"cmd",
",",
"outfile",
"=",
"sortedgff",
")",
"elif",
"opts",
".",
"method",
"==",
"\"topo\"",
":",
"GT_HOME",
"=",
"opts",
".",
"gt_home",
"if",
"not",
"op",
".",
"isdir",
"(",
"GT_HOME",
")",
":",
"logging",
".",
"error",
"(",
"\"GT_HOME={0} directory does not exist\"",
".",
"format",
"(",
"GT_HOME",
")",
")",
"sys",
".",
"exit",
"(",
")",
"cmd",
"=",
"\"{0}\"",
".",
"format",
"(",
"op",
".",
"join",
"(",
"GT_HOME",
",",
"\"bin\"",
",",
"\"gt\"",
")",
")",
"cmd",
"+=",
"\" gff3 -sort -tidy -retainids -addids no {0}\"",
".",
"format",
"(",
"gffile",
")",
"sh",
"(",
"cmd",
",",
"outfile",
"=",
"sortedgff",
")"
]
| 36.595745 | 21.361702 |
def triangle_coordinates(i, j, k):
"""
Computes coordinates of the constituent triangles of a triangulation for the
simplex. These triangules are parallel to the lower axis on the lower side.
Parameters
----------
i,j,k: enumeration of the desired triangle
Returns
-------
A numpy array of coordinates of the hexagon (unprojected)
"""
return [(i, j, k), (i + 1, j, k - 1), (i, j + 1, k - 1)] | [
"def",
"triangle_coordinates",
"(",
"i",
",",
"j",
",",
"k",
")",
":",
"return",
"[",
"(",
"i",
",",
"j",
",",
"k",
")",
",",
"(",
"i",
"+",
"1",
",",
"j",
",",
"k",
"-",
"1",
")",
",",
"(",
"i",
",",
"j",
"+",
"1",
",",
"k",
"-",
"1",
")",
"]"
]
| 28.266667 | 24.133333 |
def isEnabled(self):
"""
Return whether or not this layer is enabled and can be set as the \
current layer.
:sa linkEnabledToCurrent
:return <bool>
"""
if self._linkEnabledToCurrent:
addtl = self.isCurrent()
else:
addtl = True
return self._enabled and addtl | [
"def",
"isEnabled",
"(",
"self",
")",
":",
"if",
"self",
".",
"_linkEnabledToCurrent",
":",
"addtl",
"=",
"self",
".",
"isCurrent",
"(",
")",
"else",
":",
"addtl",
"=",
"True",
"return",
"self",
".",
"_enabled",
"and",
"addtl"
]
| 23.875 | 16.875 |
async def get(self, public_key):
"""Retrieves all users contents
Accepts:
- public key
"""
# Sign-verifying functional
if settings.SIGNATURE_VERIFICATION:
super().verify()
page = self.get_query_argument("page", 1)
cids = await self.account.getuserscontent(public_key=public_key)
logging.debug("\n\n Users cids")
logging.debug(cids)
if isinstance(cids, dict):
if "error" in cids.keys():
self.set_status(cids["error"])
self.write(cids)
raise tornado.web.Finish
container = []
for coinid in cids:
logging.debug("\n [] -- coinid")
logging.debug(coinid)
#if list(cids.keys()).index(coinid) == len(cids) - 1:
# paginator = Paginator(coinid=coinid, page=page,
# limit=(settings.LIMIT//len(cids))+(settings.LIMIT%len(cids)), cids=cids)
#else:
#paginator = Paginator(coinid=coinid, page=page,
# limit=settings.LIMIT // len(cids), cids=cids)
if coinid in settings.bridges.keys():
logging.debug(" -- Coinid in ")
logging.debug(settings.bridges.keys())
self.account.blockchain.setendpoint(settings.bridges[coinid])
contents = await self.account.blockchain.getuserscontent(
cids=json.dumps(cids[coinid]))
logging.debug("\n\n -- Contents")
logging.debug(contents)
if isinstance(contents, dict):
if "error" in contents.keys():
continue
container.extend(contents)
logging.debug("\n\n -- Container 1")
logging.debug("\n\n -- Container 2")
logging.debug(container)
response = {
"profiles":json.dumps(container),
}
try:
response.update(paginator.get_pages())
except:
pass
self.write(json.dumps(response)) | [
"async",
"def",
"get",
"(",
"self",
",",
"public_key",
")",
":",
"# Sign-verifying functional",
"if",
"settings",
".",
"SIGNATURE_VERIFICATION",
":",
"super",
"(",
")",
".",
"verify",
"(",
")",
"page",
"=",
"self",
".",
"get_query_argument",
"(",
"\"page\"",
",",
"1",
")",
"cids",
"=",
"await",
"self",
".",
"account",
".",
"getuserscontent",
"(",
"public_key",
"=",
"public_key",
")",
"logging",
".",
"debug",
"(",
"\"\\n\\n Users cids\"",
")",
"logging",
".",
"debug",
"(",
"cids",
")",
"if",
"isinstance",
"(",
"cids",
",",
"dict",
")",
":",
"if",
"\"error\"",
"in",
"cids",
".",
"keys",
"(",
")",
":",
"self",
".",
"set_status",
"(",
"cids",
"[",
"\"error\"",
"]",
")",
"self",
".",
"write",
"(",
"cids",
")",
"raise",
"tornado",
".",
"web",
".",
"Finish",
"container",
"=",
"[",
"]",
"for",
"coinid",
"in",
"cids",
":",
"logging",
".",
"debug",
"(",
"\"\\n [] -- coinid\"",
")",
"logging",
".",
"debug",
"(",
"coinid",
")",
"#if list(cids.keys()).index(coinid) == len(cids) - 1:",
"#\tpaginator = Paginator(coinid=coinid, page=page, ",
"#\t\tlimit=(settings.LIMIT//len(cids))+(settings.LIMIT%len(cids)), cids=cids)",
"#else:",
"#paginator = Paginator(coinid=coinid, page=page, ",
"#\t\t\t\t\t\tlimit=settings.LIMIT // len(cids), cids=cids)",
"if",
"coinid",
"in",
"settings",
".",
"bridges",
".",
"keys",
"(",
")",
":",
"logging",
".",
"debug",
"(",
"\" -- Coinid in \"",
")",
"logging",
".",
"debug",
"(",
"settings",
".",
"bridges",
".",
"keys",
"(",
")",
")",
"self",
".",
"account",
".",
"blockchain",
".",
"setendpoint",
"(",
"settings",
".",
"bridges",
"[",
"coinid",
"]",
")",
"contents",
"=",
"await",
"self",
".",
"account",
".",
"blockchain",
".",
"getuserscontent",
"(",
"cids",
"=",
"json",
".",
"dumps",
"(",
"cids",
"[",
"coinid",
"]",
")",
")",
"logging",
".",
"debug",
"(",
"\"\\n\\n -- Contents\"",
")",
"logging",
".",
"debug",
"(",
"contents",
")",
"if",
"isinstance",
"(",
"contents",
",",
"dict",
")",
":",
"if",
"\"error\"",
"in",
"contents",
".",
"keys",
"(",
")",
":",
"continue",
"container",
".",
"extend",
"(",
"contents",
")",
"logging",
".",
"debug",
"(",
"\"\\n\\n -- Container 1\"",
")",
"logging",
".",
"debug",
"(",
"\"\\n\\n -- Container 2\"",
")",
"logging",
".",
"debug",
"(",
"container",
")",
"response",
"=",
"{",
"\"profiles\"",
":",
"json",
".",
"dumps",
"(",
"container",
")",
",",
"}",
"try",
":",
"response",
".",
"update",
"(",
"paginator",
".",
"get_pages",
"(",
")",
")",
"except",
":",
"pass",
"self",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"response",
")",
")"
]
| 24.692308 | 20.015385 |
def getFeatureReport(self, report_num=0, length=63):
"""
Receive a feature report.
Blocks, unless you configured provided file (descriptor) to be
non-blocking.
"""
length += 1
buf = bytearray(length)
buf[0] = report_num
self._ioctl(
_HIDIOCGFEATURE(length),
(ctypes.c_char * length).from_buffer(buf),
True,
)
return buf | [
"def",
"getFeatureReport",
"(",
"self",
",",
"report_num",
"=",
"0",
",",
"length",
"=",
"63",
")",
":",
"length",
"+=",
"1",
"buf",
"=",
"bytearray",
"(",
"length",
")",
"buf",
"[",
"0",
"]",
"=",
"report_num",
"self",
".",
"_ioctl",
"(",
"_HIDIOCGFEATURE",
"(",
"length",
")",
",",
"(",
"ctypes",
".",
"c_char",
"*",
"length",
")",
".",
"from_buffer",
"(",
"buf",
")",
",",
"True",
",",
")",
"return",
"buf"
]
| 28.6 | 15 |
def QA_SU_save_stock_list(engine, client=DATABASE):
"""save stock_list
Arguments:
engine {[type]} -- [description]
Keyword Arguments:
client {[type]} -- [description] (default: {DATABASE})
"""
engine = select_save_engine(engine)
engine.QA_SU_save_stock_list(client=client) | [
"def",
"QA_SU_save_stock_list",
"(",
"engine",
",",
"client",
"=",
"DATABASE",
")",
":",
"engine",
"=",
"select_save_engine",
"(",
"engine",
")",
"engine",
".",
"QA_SU_save_stock_list",
"(",
"client",
"=",
"client",
")"
]
| 25.333333 | 17.083333 |
def setup_logging(namespace):
"""
setup global logging
"""
loglevel = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}.get(namespace.verbosity, logging.DEBUG)
if namespace.verbosity > 1:
logformat = '%(levelname)s csvpandas %(lineno)s %(message)s'
else:
logformat = 'csvpandas %(message)s'
logging.basicConfig(stream=namespace.log, format=logformat, level=loglevel) | [
"def",
"setup_logging",
"(",
"namespace",
")",
":",
"loglevel",
"=",
"{",
"0",
":",
"logging",
".",
"ERROR",
",",
"1",
":",
"logging",
".",
"WARNING",
",",
"2",
":",
"logging",
".",
"INFO",
",",
"3",
":",
"logging",
".",
"DEBUG",
",",
"}",
".",
"get",
"(",
"namespace",
".",
"verbosity",
",",
"logging",
".",
"DEBUG",
")",
"if",
"namespace",
".",
"verbosity",
">",
"1",
":",
"logformat",
"=",
"'%(levelname)s csvpandas %(lineno)s %(message)s'",
"else",
":",
"logformat",
"=",
"'csvpandas %(message)s'",
"logging",
".",
"basicConfig",
"(",
"stream",
"=",
"namespace",
".",
"log",
",",
"format",
"=",
"logformat",
",",
"level",
"=",
"loglevel",
")"
]
| 25.5 | 19.166667 |
def authenticate(self, req_data, key=None):
"""
Authenticates a given request data by verifying signatures from
any registered authenticators. If the request is a query returns
immediately, if no registered authenticator can authenticate then an
exception is raised.
:param req_data:
:return:
"""
identifiers = set()
typ = req_data.get(OPERATION, {}).get(TXN_TYPE)
if key and self._check_and_verify_existing_req(req_data, key):
return self._verified_reqs[key]['identifiers']
for authenticator in self._authenticators:
if authenticator.is_query(typ):
return set()
if not (authenticator.is_write(typ) or
authenticator.is_action(typ)):
continue
rv = authenticator.authenticate(deepcopy(req_data)) or set()
identifiers.update(rv)
if not identifiers:
raise NoAuthenticatorFound
if key:
self._verified_reqs[key] = {'signature': req_data.get(f.SIG.nm)}
self._verified_reqs[key]['identifiers'] = identifiers
return identifiers | [
"def",
"authenticate",
"(",
"self",
",",
"req_data",
",",
"key",
"=",
"None",
")",
":",
"identifiers",
"=",
"set",
"(",
")",
"typ",
"=",
"req_data",
".",
"get",
"(",
"OPERATION",
",",
"{",
"}",
")",
".",
"get",
"(",
"TXN_TYPE",
")",
"if",
"key",
"and",
"self",
".",
"_check_and_verify_existing_req",
"(",
"req_data",
",",
"key",
")",
":",
"return",
"self",
".",
"_verified_reqs",
"[",
"key",
"]",
"[",
"'identifiers'",
"]",
"for",
"authenticator",
"in",
"self",
".",
"_authenticators",
":",
"if",
"authenticator",
".",
"is_query",
"(",
"typ",
")",
":",
"return",
"set",
"(",
")",
"if",
"not",
"(",
"authenticator",
".",
"is_write",
"(",
"typ",
")",
"or",
"authenticator",
".",
"is_action",
"(",
"typ",
")",
")",
":",
"continue",
"rv",
"=",
"authenticator",
".",
"authenticate",
"(",
"deepcopy",
"(",
"req_data",
")",
")",
"or",
"set",
"(",
")",
"identifiers",
".",
"update",
"(",
"rv",
")",
"if",
"not",
"identifiers",
":",
"raise",
"NoAuthenticatorFound",
"if",
"key",
":",
"self",
".",
"_verified_reqs",
"[",
"key",
"]",
"=",
"{",
"'signature'",
":",
"req_data",
".",
"get",
"(",
"f",
".",
"SIG",
".",
"nm",
")",
"}",
"self",
".",
"_verified_reqs",
"[",
"key",
"]",
"[",
"'identifiers'",
"]",
"=",
"identifiers",
"return",
"identifiers"
]
| 40 | 18.068966 |
def detect_state_variable_shadowing(contracts):
"""
Detects all overshadowing and overshadowed state variables in the provided contracts.
:param contracts: The contracts to detect shadowing within.
:return: Returns a set of tuples (overshadowing_contract, overshadowing_state_var, overshadowed_contract,
overshadowed_state_var).
The contract-variable pair's variable does not need to be defined in its paired contract, it may have been
inherited. The contracts are simply included to denote the immediate inheritance path from which the shadowed
variable originates.
"""
results = set()
for contract in contracts:
variables_declared = {variable.name: variable for variable in contract.variables
if variable.contract == contract}
for immediate_base_contract in contract.immediate_inheritance:
for variable in immediate_base_contract.variables:
if variable.name in variables_declared:
results.add((contract, variables_declared[variable.name], immediate_base_contract, variable))
return results | [
"def",
"detect_state_variable_shadowing",
"(",
"contracts",
")",
":",
"results",
"=",
"set",
"(",
")",
"for",
"contract",
"in",
"contracts",
":",
"variables_declared",
"=",
"{",
"variable",
".",
"name",
":",
"variable",
"for",
"variable",
"in",
"contract",
".",
"variables",
"if",
"variable",
".",
"contract",
"==",
"contract",
"}",
"for",
"immediate_base_contract",
"in",
"contract",
".",
"immediate_inheritance",
":",
"for",
"variable",
"in",
"immediate_base_contract",
".",
"variables",
":",
"if",
"variable",
".",
"name",
"in",
"variables_declared",
":",
"results",
".",
"add",
"(",
"(",
"contract",
",",
"variables_declared",
"[",
"variable",
".",
"name",
"]",
",",
"immediate_base_contract",
",",
"variable",
")",
")",
"return",
"results"
]
| 58.684211 | 30.684211 |
def file_or_default(path, default, function = None):
""" Return a default value if a file does not exist """
try:
result = file_get_contents(path)
if function != None: return function(result)
return result
except IOError as e:
if e.errno == errno.ENOENT: return default
raise | [
"def",
"file_or_default",
"(",
"path",
",",
"default",
",",
"function",
"=",
"None",
")",
":",
"try",
":",
"result",
"=",
"file_get_contents",
"(",
"path",
")",
"if",
"function",
"!=",
"None",
":",
"return",
"function",
"(",
"result",
")",
"return",
"result",
"except",
"IOError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"return",
"default",
"raise"
]
| 35.444444 | 14.222222 |
def from_str(cls, tagstring):
"""Create a tag by parsing the tag of a message
:param tagstring: A tag string described in the irc protocol
:type tagstring: :class:`str`
:returns: A tag
:rtype: :class:`Tag`
:raises: None
"""
m = cls._parse_regexp.match(tagstring)
return cls(name=m.group('name'), value=m.group('value'), vendor=m.group('vendor')) | [
"def",
"from_str",
"(",
"cls",
",",
"tagstring",
")",
":",
"m",
"=",
"cls",
".",
"_parse_regexp",
".",
"match",
"(",
"tagstring",
")",
"return",
"cls",
"(",
"name",
"=",
"m",
".",
"group",
"(",
"'name'",
")",
",",
"value",
"=",
"m",
".",
"group",
"(",
"'value'",
")",
",",
"vendor",
"=",
"m",
".",
"group",
"(",
"'vendor'",
")",
")"
]
| 37.090909 | 16.909091 |
def get(self, path, data=None, return_fields=None):
"""Call the Infoblox device to get the obj for the data passed in
:param str obj_reference: The object reference data
:param dict data: The data for the get request
:rtype: requests.Response
"""
return self.session.get(self._request_url(path, return_fields),
data=json.dumps(data),
auth=self.auth, verify=False) | [
"def",
"get",
"(",
"self",
",",
"path",
",",
"data",
"=",
"None",
",",
"return_fields",
"=",
"None",
")",
":",
"return",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"_request_url",
"(",
"path",
",",
"return_fields",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"auth",
"=",
"self",
".",
"auth",
",",
"verify",
"=",
"False",
")"
]
| 42.454545 | 17.909091 |
def list_objects(self, bucket_name=None, **kwargs):
"""
This method is primarily for illustration and just calls the
boto3 client implementation of list_objects but is a common task
for first time Predix BlobStore users.
"""
if not bucket_name: bucket_name = self.bucket_name
return self.client.list_objects(Bucket=bucket_name, **kwargs) | [
"def",
"list_objects",
"(",
"self",
",",
"bucket_name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"bucket_name",
":",
"bucket_name",
"=",
"self",
".",
"bucket_name",
"return",
"self",
".",
"client",
".",
"list_objects",
"(",
"Bucket",
"=",
"bucket_name",
",",
"*",
"*",
"kwargs",
")"
]
| 48.375 | 15.625 |
def _configure_manager(self):
"""
Creates a manager to handle autoscale operations.
"""
self._manager = ScalingGroupManager(self,
resource_class=ScalingGroup, response_key="group",
uri_base="groups") | [
"def",
"_configure_manager",
"(",
"self",
")",
":",
"self",
".",
"_manager",
"=",
"ScalingGroupManager",
"(",
"self",
",",
"resource_class",
"=",
"ScalingGroup",
",",
"response_key",
"=",
"\"group\"",
",",
"uri_base",
"=",
"\"groups\"",
")"
]
| 36.714286 | 9.857143 |
def install_paths(version=None, iddname=None):
"""Get the install paths for EnergyPlus executable and weather files.
We prefer to get the install path from the IDD name but fall back to
getting it from the version number for backwards compatibility and to
simplify tests.
Parameters
----------
version : str, optional
EnergyPlus version in the format "X-X-X", e.g. "8-7-0".
iddname : str, optional
File path to the IDD.
Returns
-------
eplus_exe : str
Full path to the EnergyPlus executable.
eplus_weather : str
Full path to the EnergyPlus weather directory.
"""
try:
eplus_exe, eplus_home = paths_from_iddname(iddname)
except (AttributeError, TypeError, ValueError):
eplus_exe, eplus_home = paths_from_version(version)
eplus_weather = os.path.join(eplus_home, 'WeatherData')
return eplus_exe, eplus_weather | [
"def",
"install_paths",
"(",
"version",
"=",
"None",
",",
"iddname",
"=",
"None",
")",
":",
"try",
":",
"eplus_exe",
",",
"eplus_home",
"=",
"paths_from_iddname",
"(",
"iddname",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
",",
"ValueError",
")",
":",
"eplus_exe",
",",
"eplus_home",
"=",
"paths_from_version",
"(",
"version",
")",
"eplus_weather",
"=",
"os",
".",
"path",
".",
"join",
"(",
"eplus_home",
",",
"'WeatherData'",
")",
"return",
"eplus_exe",
",",
"eplus_weather"
]
| 31.034483 | 21.586207 |
def get_draft(self):
"""
Return self if this object is a draft, otherwise return the draft
copy of a published item.
"""
if self.is_draft:
return self
elif self.is_published:
draft = self.publishing_draft
# Previously the reverse relation could be `DraftItemBoobyTrapped`
# in some cases. This should be fixed by extra monkey-patching of
# the `publishing_draft` field in icekit.publishing.apps, but we
# will leave this extra sanity check here just in case.
if hasattr(draft, 'get_draft_payload'):
draft = draft.get_draft_payload()
return draft
raise ValueError( # pragma: no cover
"Publishable object %r is neither draft nor published" % self) | [
"def",
"get_draft",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_draft",
":",
"return",
"self",
"elif",
"self",
".",
"is_published",
":",
"draft",
"=",
"self",
".",
"publishing_draft",
"# Previously the reverse relation could be `DraftItemBoobyTrapped`",
"# in some cases. This should be fixed by extra monkey-patching of",
"# the `publishing_draft` field in icekit.publishing.apps, but we",
"# will leave this extra sanity check here just in case.",
"if",
"hasattr",
"(",
"draft",
",",
"'get_draft_payload'",
")",
":",
"draft",
"=",
"draft",
".",
"get_draft_payload",
"(",
")",
"return",
"draft",
"raise",
"ValueError",
"(",
"# pragma: no cover",
"\"Publishable object %r is neither draft nor published\"",
"%",
"self",
")"
]
| 44.944444 | 17.5 |
def remove_slp(img, gstd1=GSTD1, gstd2=GSTD2, gstd3=GSTD3, ksize=KSIZE, w=W):
"""Remove the SLP from kinect IR image
The input image should be a float32 numpy array, and should NOT be a square root image
Parameters
------------------
img : (M, N) float ndarray
Kinect NIR image with SLP pattern
gstd1 : float
Standard deviation of gaussian kernel 1
gstd2 : float
Standard deviation of gaussian kernel 2
gstd3 : float
Standard deviation of gaussian kernel 3
ksize : int
Size of kernel (should be odd)
w : float
Weighting factor
Returns
------------------
img_noslp : (M,N) float ndarray
Input image with SLP removed
"""
gf1 = cv2.getGaussianKernel(ksize, gstd1)
gf2 = cv2.getGaussianKernel(ksize, gstd2)
gf3 = cv2.getGaussianKernel(ksize, gstd3)
sqrtimg = cv2.sqrt(img)
p1 = cv2.sepFilter2D(sqrtimg, -1, gf1, gf1)
p2 = cv2.sepFilter2D(sqrtimg, -1, gf2, gf2)
maxarr = np.maximum(0, (p1 - p2) / p2)
minarr = np.minimum(w * maxarr, 1)
p = 1 - minarr
nc = cv2.sepFilter2D(p, -1, gf3, gf3) + EPS
output = cv2.sepFilter2D(p*sqrtimg, -1, gf3, gf3)
output = (output / nc) ** 2 # Since input is sqrted
return output | [
"def",
"remove_slp",
"(",
"img",
",",
"gstd1",
"=",
"GSTD1",
",",
"gstd2",
"=",
"GSTD2",
",",
"gstd3",
"=",
"GSTD3",
",",
"ksize",
"=",
"KSIZE",
",",
"w",
"=",
"W",
")",
":",
"gf1",
"=",
"cv2",
".",
"getGaussianKernel",
"(",
"ksize",
",",
"gstd1",
")",
"gf2",
"=",
"cv2",
".",
"getGaussianKernel",
"(",
"ksize",
",",
"gstd2",
")",
"gf3",
"=",
"cv2",
".",
"getGaussianKernel",
"(",
"ksize",
",",
"gstd3",
")",
"sqrtimg",
"=",
"cv2",
".",
"sqrt",
"(",
"img",
")",
"p1",
"=",
"cv2",
".",
"sepFilter2D",
"(",
"sqrtimg",
",",
"-",
"1",
",",
"gf1",
",",
"gf1",
")",
"p2",
"=",
"cv2",
".",
"sepFilter2D",
"(",
"sqrtimg",
",",
"-",
"1",
",",
"gf2",
",",
"gf2",
")",
"maxarr",
"=",
"np",
".",
"maximum",
"(",
"0",
",",
"(",
"p1",
"-",
"p2",
")",
"/",
"p2",
")",
"minarr",
"=",
"np",
".",
"minimum",
"(",
"w",
"*",
"maxarr",
",",
"1",
")",
"p",
"=",
"1",
"-",
"minarr",
"nc",
"=",
"cv2",
".",
"sepFilter2D",
"(",
"p",
",",
"-",
"1",
",",
"gf3",
",",
"gf3",
")",
"+",
"EPS",
"output",
"=",
"cv2",
".",
"sepFilter2D",
"(",
"p",
"*",
"sqrtimg",
",",
"-",
"1",
",",
"gf3",
",",
"gf3",
")",
"output",
"=",
"(",
"output",
"/",
"nc",
")",
"**",
"2",
"# Since input is sqrted",
"return",
"output"
]
| 33.5 | 15.842105 |
def validate_instance(self, instance, import_validation_errors=None, validate_unique=True):
"""
Takes any validation errors that were raised by
:meth:`~import_export.resources.Resource.import_obj`, and combines them
with validation errors raised by the instance's ``full_clean()``
method. The combined errors are then re-raised as single, multi-field
ValidationError.
If the ``clean_model_instances`` option is False, the instances's
``full_clean()`` method is not called, and only the errors raised by
``import_obj()`` are re-raised.
"""
if import_validation_errors is None:
errors = {}
else:
errors = import_validation_errors.copy()
if self._meta.clean_model_instances:
try:
instance.full_clean(
exclude=errors.keys(),
validate_unique=validate_unique,
)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors) | [
"def",
"validate_instance",
"(",
"self",
",",
"instance",
",",
"import_validation_errors",
"=",
"None",
",",
"validate_unique",
"=",
"True",
")",
":",
"if",
"import_validation_errors",
"is",
"None",
":",
"errors",
"=",
"{",
"}",
"else",
":",
"errors",
"=",
"import_validation_errors",
".",
"copy",
"(",
")",
"if",
"self",
".",
"_meta",
".",
"clean_model_instances",
":",
"try",
":",
"instance",
".",
"full_clean",
"(",
"exclude",
"=",
"errors",
".",
"keys",
"(",
")",
",",
"validate_unique",
"=",
"validate_unique",
",",
")",
"except",
"ValidationError",
"as",
"e",
":",
"errors",
"=",
"e",
".",
"update_error_dict",
"(",
"errors",
")",
"if",
"errors",
":",
"raise",
"ValidationError",
"(",
"errors",
")"
]
| 40.666667 | 18.666667 |
def get_build_report(self, project, build_id, type=None):
"""GetBuildReport.
[Preview API] Gets a build report.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param str type:
:rtype: :class:`<BuildReportMetadata> <azure.devops.v5_0.build.models.BuildReportMetadata>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if type is not None:
query_parameters['type'] = self._serialize.query('type', type, 'str')
response = self._send(http_method='GET',
location_id='45bcaa88-67e1-4042-a035-56d3b4a7d44c',
version='5.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildReportMetadata', response) | [
"def",
"get_build_report",
"(",
"self",
",",
"project",
",",
"build_id",
",",
"type",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"if",
"build_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'buildId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'build_id'",
",",
"build_id",
",",
"'int'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"type",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'type'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'type'",
",",
"type",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'45bcaa88-67e1-4042-a035-56d3b4a7d44c'",
",",
"version",
"=",
"'5.0-preview.2'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'BuildReportMetadata'",
",",
"response",
")"
]
| 51.181818 | 19.590909 |
def update_score_summary(sender, **kwargs):
"""
Listen for new Scores and update the relevant ScoreSummary.
Args:
sender: not used
Kwargs:
instance (Score): The score model whose save triggered this receiver.
"""
score = kwargs['instance']
try:
score_summary = ScoreSummary.objects.get(
student_item=score.student_item
)
score_summary.latest = score
# A score with the "reset" flag set will always replace the current highest score
if score.reset:
score_summary.highest = score
# The conversion to a float may return None if points possible is zero
# In Python, None is always less than an integer, so any score
# with non-null points possible will take precedence.
elif score.to_float() > score_summary.highest.to_float():
score_summary.highest = score
score_summary.save()
except ScoreSummary.DoesNotExist:
ScoreSummary.objects.create(
student_item=score.student_item,
highest=score,
latest=score,
)
except DatabaseError as err:
logger.exception(
u"Error while updating score summary for student item {}"
.format(score.student_item)
) | [
"def",
"update_score_summary",
"(",
"sender",
",",
"*",
"*",
"kwargs",
")",
":",
"score",
"=",
"kwargs",
"[",
"'instance'",
"]",
"try",
":",
"score_summary",
"=",
"ScoreSummary",
".",
"objects",
".",
"get",
"(",
"student_item",
"=",
"score",
".",
"student_item",
")",
"score_summary",
".",
"latest",
"=",
"score",
"# A score with the \"reset\" flag set will always replace the current highest score",
"if",
"score",
".",
"reset",
":",
"score_summary",
".",
"highest",
"=",
"score",
"# The conversion to a float may return None if points possible is zero",
"# In Python, None is always less than an integer, so any score",
"# with non-null points possible will take precedence.",
"elif",
"score",
".",
"to_float",
"(",
")",
">",
"score_summary",
".",
"highest",
".",
"to_float",
"(",
")",
":",
"score_summary",
".",
"highest",
"=",
"score",
"score_summary",
".",
"save",
"(",
")",
"except",
"ScoreSummary",
".",
"DoesNotExist",
":",
"ScoreSummary",
".",
"objects",
".",
"create",
"(",
"student_item",
"=",
"score",
".",
"student_item",
",",
"highest",
"=",
"score",
",",
"latest",
"=",
"score",
",",
")",
"except",
"DatabaseError",
"as",
"err",
":",
"logger",
".",
"exception",
"(",
"u\"Error while updating score summary for student item {}\"",
".",
"format",
"(",
"score",
".",
"student_item",
")",
")"
]
| 36.710526 | 19.078947 |
def pkg_contents(self):
"""Print packages contents
"""
packages = self.args[1:]
options = [
"-d",
"--display"
]
if len(self.args) > 1 and self.args[0] in options:
PackageManager(packages).display()
else:
usage("") | [
"def",
"pkg_contents",
"(",
"self",
")",
":",
"packages",
"=",
"self",
".",
"args",
"[",
"1",
":",
"]",
"options",
"=",
"[",
"\"-d\"",
",",
"\"--display\"",
"]",
"if",
"len",
"(",
"self",
".",
"args",
")",
">",
"1",
"and",
"self",
".",
"args",
"[",
"0",
"]",
"in",
"options",
":",
"PackageManager",
"(",
"packages",
")",
".",
"display",
"(",
")",
"else",
":",
"usage",
"(",
"\"\"",
")"
]
| 25.5 | 15.583333 |
def _get_stack_frame(stacklevel):
"""
utility functions to get a stackframe, skipping internal frames.
"""
stacklevel = stacklevel + 1
if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)):
# If frame is too small to care or if the warning originated in
# internal code, then do not try to hide any frames.
frame = sys._getframe(stacklevel)
else:
frame = sys._getframe(1)
# Look for one frame less since the above line starts us off.
for x in range(stacklevel-1):
frame = _next_external_frame(frame)
if frame is None:
raise ValueError
return frame | [
"def",
"_get_stack_frame",
"(",
"stacklevel",
")",
":",
"stacklevel",
"=",
"stacklevel",
"+",
"1",
"if",
"stacklevel",
"<=",
"1",
"or",
"_is_internal_frame",
"(",
"sys",
".",
"_getframe",
"(",
"1",
")",
")",
":",
"# If frame is too small to care or if the warning originated in",
"# internal code, then do not try to hide any frames.",
"frame",
"=",
"sys",
".",
"_getframe",
"(",
"stacklevel",
")",
"else",
":",
"frame",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
"# Look for one frame less since the above line starts us off.",
"for",
"x",
"in",
"range",
"(",
"stacklevel",
"-",
"1",
")",
":",
"frame",
"=",
"_next_external_frame",
"(",
"frame",
")",
"if",
"frame",
"is",
"None",
":",
"raise",
"ValueError",
"return",
"frame"
]
| 38.352941 | 14.117647 |
def kp_pan_zoom_set(self, viewer, event, data_x, data_y, msg=True):
"""Sets the pan position under the cursor."""
if self.canpan:
reg = 1
with viewer.suppress_redraw:
viewer.panset_xy(data_x, data_y)
scale_x, scale_y = self._save.get((viewer, 'scale', reg),
(1.0, 1.0))
viewer.scale_to(scale_x, scale_y)
return True | [
"def",
"kp_pan_zoom_set",
"(",
"self",
",",
"viewer",
",",
"event",
",",
"data_x",
",",
"data_y",
",",
"msg",
"=",
"True",
")",
":",
"if",
"self",
".",
"canpan",
":",
"reg",
"=",
"1",
"with",
"viewer",
".",
"suppress_redraw",
":",
"viewer",
".",
"panset_xy",
"(",
"data_x",
",",
"data_y",
")",
"scale_x",
",",
"scale_y",
"=",
"self",
".",
"_save",
".",
"get",
"(",
"(",
"viewer",
",",
"'scale'",
",",
"reg",
")",
",",
"(",
"1.0",
",",
"1.0",
")",
")",
"viewer",
".",
"scale_to",
"(",
"scale_x",
",",
"scale_y",
")",
"return",
"True"
]
| 45.2 | 15.7 |
def make_env(env_type, real_env, sim_env_kwargs):
"""Factory function for envs."""
return {
"real": lambda: real_env.new_like( # pylint: disable=g-long-lambda
batch_size=sim_env_kwargs["batch_size"],
store_rollouts=False,
),
"simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda
**sim_env_kwargs
),
}[env_type]() | [
"def",
"make_env",
"(",
"env_type",
",",
"real_env",
",",
"sim_env_kwargs",
")",
":",
"return",
"{",
"\"real\"",
":",
"lambda",
":",
"real_env",
".",
"new_like",
"(",
"# pylint: disable=g-long-lambda",
"batch_size",
"=",
"sim_env_kwargs",
"[",
"\"batch_size\"",
"]",
",",
"store_rollouts",
"=",
"False",
",",
")",
",",
"\"simulated\"",
":",
"lambda",
":",
"rl_utils",
".",
"SimulatedBatchGymEnvWithFixedInitialFrames",
"(",
"# pylint: disable=g-long-lambda",
"*",
"*",
"sim_env_kwargs",
")",
",",
"}",
"[",
"env_type",
"]",
"(",
")"
]
| 37.818182 | 24.181818 |
def name(self):
"""str: name of the file entry, which does not include the full path.
Raises:
BackEndError: if pytsk3 returns a non UTF-8 formatted name.
"""
if self._name is None:
# If pytsk3.FS_Info.open() was used file.info has an attribute name
# (pytsk3.TSK_FS_FILE) that contains the name string. Otherwise the
# name from the path specification is used.
if getattr(self._tsk_file.info, 'name', None) is not None:
name = getattr(self._tsk_file.info.name, 'name', None)
try:
# pytsk3 returns an UTF-8 encoded byte string.
self._name = name.decode('utf8')
except UnicodeError:
raise errors.BackEndError(
'pytsk3 returned a non UTF-8 formatted name.')
else:
location = getattr(self.path_spec, 'location', None)
if location:
self._name = self._file_system.BasenamePath(location)
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"if",
"self",
".",
"_name",
"is",
"None",
":",
"# If pytsk3.FS_Info.open() was used file.info has an attribute name",
"# (pytsk3.TSK_FS_FILE) that contains the name string. Otherwise the",
"# name from the path specification is used.",
"if",
"getattr",
"(",
"self",
".",
"_tsk_file",
".",
"info",
",",
"'name'",
",",
"None",
")",
"is",
"not",
"None",
":",
"name",
"=",
"getattr",
"(",
"self",
".",
"_tsk_file",
".",
"info",
".",
"name",
",",
"'name'",
",",
"None",
")",
"try",
":",
"# pytsk3 returns an UTF-8 encoded byte string.",
"self",
".",
"_name",
"=",
"name",
".",
"decode",
"(",
"'utf8'",
")",
"except",
"UnicodeError",
":",
"raise",
"errors",
".",
"BackEndError",
"(",
"'pytsk3 returned a non UTF-8 formatted name.'",
")",
"else",
":",
"location",
"=",
"getattr",
"(",
"self",
".",
"path_spec",
",",
"'location'",
",",
"None",
")",
"if",
"location",
":",
"self",
".",
"_name",
"=",
"self",
".",
"_file_system",
".",
"BasenamePath",
"(",
"location",
")",
"return",
"self",
".",
"_name"
]
| 35.653846 | 21.807692 |
def relative_noise_size(self, data, noise):
'''
:data: original data as numpy matrix
:noise: noise matrix as numpy matrix
'''
return np.mean([
sci_dist.cosine(u / la.norm(u), v / la.norm(v))
for u, v in zip(noise, data)
]) | [
"def",
"relative_noise_size",
"(",
"self",
",",
"data",
",",
"noise",
")",
":",
"return",
"np",
".",
"mean",
"(",
"[",
"sci_dist",
".",
"cosine",
"(",
"u",
"/",
"la",
".",
"norm",
"(",
"u",
")",
",",
"v",
"/",
"la",
".",
"norm",
"(",
"v",
")",
")",
"for",
"u",
",",
"v",
"in",
"zip",
"(",
"noise",
",",
"data",
")",
"]",
")"
]
| 31.777778 | 14.888889 |
def close(self):
"""
Write final shp, shx, and dbf headers, close opened files.
"""
# Check if any of the files have already been closed
shp_open = self.shp and not (hasattr(self.shp, 'closed') and self.shp.closed)
shx_open = self.shx and not (hasattr(self.shx, 'closed') and self.shx.closed)
dbf_open = self.dbf and not (hasattr(self.dbf, 'closed') and self.dbf.closed)
# Balance if already not balanced
if self.shp and shp_open and self.dbf and dbf_open:
if self.autoBalance:
self.balance()
if self.recNum != self.shpNum:
raise ShapefileException("When saving both the dbf and shp file, "
"the number of records (%s) must correspond "
"with the number of shapes (%s)" % (self.recNum, self.shpNum))
# Fill in the blank headers
if self.shp and shp_open:
self.__shapefileHeader(self.shp, headerType='shp')
if self.shx and shx_open:
self.__shapefileHeader(self.shx, headerType='shx')
# Update the dbf header with final length etc
if self.dbf and dbf_open:
self.__dbfHeader()
# Close files, if target is a filepath
if self.target:
for attribute in (self.shp, self.shx, self.dbf):
if hasattr(attribute, 'close'):
try:
attribute.close()
except IOError:
pass | [
"def",
"close",
"(",
"self",
")",
":",
"# Check if any of the files have already been closed\r",
"shp_open",
"=",
"self",
".",
"shp",
"and",
"not",
"(",
"hasattr",
"(",
"self",
".",
"shp",
",",
"'closed'",
")",
"and",
"self",
".",
"shp",
".",
"closed",
")",
"shx_open",
"=",
"self",
".",
"shx",
"and",
"not",
"(",
"hasattr",
"(",
"self",
".",
"shx",
",",
"'closed'",
")",
"and",
"self",
".",
"shx",
".",
"closed",
")",
"dbf_open",
"=",
"self",
".",
"dbf",
"and",
"not",
"(",
"hasattr",
"(",
"self",
".",
"dbf",
",",
"'closed'",
")",
"and",
"self",
".",
"dbf",
".",
"closed",
")",
"# Balance if already not balanced\r",
"if",
"self",
".",
"shp",
"and",
"shp_open",
"and",
"self",
".",
"dbf",
"and",
"dbf_open",
":",
"if",
"self",
".",
"autoBalance",
":",
"self",
".",
"balance",
"(",
")",
"if",
"self",
".",
"recNum",
"!=",
"self",
".",
"shpNum",
":",
"raise",
"ShapefileException",
"(",
"\"When saving both the dbf and shp file, \"",
"\"the number of records (%s) must correspond \"",
"\"with the number of shapes (%s)\"",
"%",
"(",
"self",
".",
"recNum",
",",
"self",
".",
"shpNum",
")",
")",
"# Fill in the blank headers\r",
"if",
"self",
".",
"shp",
"and",
"shp_open",
":",
"self",
".",
"__shapefileHeader",
"(",
"self",
".",
"shp",
",",
"headerType",
"=",
"'shp'",
")",
"if",
"self",
".",
"shx",
"and",
"shx_open",
":",
"self",
".",
"__shapefileHeader",
"(",
"self",
".",
"shx",
",",
"headerType",
"=",
"'shx'",
")",
"# Update the dbf header with final length etc\r",
"if",
"self",
".",
"dbf",
"and",
"dbf_open",
":",
"self",
".",
"__dbfHeader",
"(",
")",
"# Close files, if target is a filepath\r",
"if",
"self",
".",
"target",
":",
"for",
"attribute",
"in",
"(",
"self",
".",
"shp",
",",
"self",
".",
"shx",
",",
"self",
".",
"dbf",
")",
":",
"if",
"hasattr",
"(",
"attribute",
",",
"'close'",
")",
":",
"try",
":",
"attribute",
".",
"close",
"(",
")",
"except",
"IOError",
":",
"pass"
]
| 45.285714 | 19.571429 |
def locate_fixed_differences(ac1, ac2):
"""Locate variants with no shared alleles between two populations.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
Returns
-------
loc : ndarray, bool, shape (n_variants,)
See Also
--------
allel.stats.diversity.windowed_df
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 1], [0, 1], [1, 1], [1, 1]],
... [[0, 0], [0, 0], [1, 1], [2, 2]],
... [[0, 0], [-1, -1], [1, 1], [-1, -1]]])
>>> ac1 = g.count_alleles(subpop=[0, 1])
>>> ac2 = g.count_alleles(subpop=[2, 3])
>>> loc_df = allel.locate_fixed_differences(ac1, ac2)
>>> loc_df
array([ True, False, False, True, True])
"""
# check inputs
ac1 = asarray_ndim(ac1, 2)
ac2 = asarray_ndim(ac2, 2)
check_dim0_aligned(ac1, ac2)
ac1, ac2 = ensure_dim1_aligned(ac1, ac2)
# stack allele counts for convenience
pac = np.dstack([ac1, ac2])
# count numbers of alleles called in each population
pan = np.sum(pac, axis=1)
# count the numbers of populations with each allele
npa = np.sum(pac > 0, axis=2)
# locate variants with allele calls in both populations
non_missing = np.all(pan > 0, axis=1)
# locate variants where all alleles are only found in a single population
no_shared_alleles = np.all(npa <= 1, axis=1)
return non_missing & no_shared_alleles | [
"def",
"locate_fixed_differences",
"(",
"ac1",
",",
"ac2",
")",
":",
"# check inputs",
"ac1",
"=",
"asarray_ndim",
"(",
"ac1",
",",
"2",
")",
"ac2",
"=",
"asarray_ndim",
"(",
"ac2",
",",
"2",
")",
"check_dim0_aligned",
"(",
"ac1",
",",
"ac2",
")",
"ac1",
",",
"ac2",
"=",
"ensure_dim1_aligned",
"(",
"ac1",
",",
"ac2",
")",
"# stack allele counts for convenience",
"pac",
"=",
"np",
".",
"dstack",
"(",
"[",
"ac1",
",",
"ac2",
"]",
")",
"# count numbers of alleles called in each population",
"pan",
"=",
"np",
".",
"sum",
"(",
"pac",
",",
"axis",
"=",
"1",
")",
"# count the numbers of populations with each allele",
"npa",
"=",
"np",
".",
"sum",
"(",
"pac",
">",
"0",
",",
"axis",
"=",
"2",
")",
"# locate variants with allele calls in both populations",
"non_missing",
"=",
"np",
".",
"all",
"(",
"pan",
">",
"0",
",",
"axis",
"=",
"1",
")",
"# locate variants where all alleles are only found in a single population",
"no_shared_alleles",
"=",
"np",
".",
"all",
"(",
"npa",
"<=",
"1",
",",
"axis",
"=",
"1",
")",
"return",
"non_missing",
"&",
"no_shared_alleles"
]
| 30.561404 | 21.105263 |
def startDrag(self, index):
"""start a drag operation with a PandasCellPayload on defined index.
Args:
index (QModelIndex): model index you want to start the drag operation.
"""
if not index.isValid():
return
dataFrame = self.model().dataFrame()
# get all infos from dataFrame
dfindex = dataFrame.iloc[[index.row()]].index
columnName = dataFrame.columns[index.column()]
dtype = dataFrame[columnName].dtype
value = dataFrame[columnName][dfindex]
# create the mime data
mimePayload = PandasCellPayload(
dfindex,
columnName,
value,
dtype,
hex(id(self.model()))
)
mimeData = MimeData()
mimeData.setData(mimePayload)
# create the drag icon and start drag operation
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
pixmap = QtGui.QPixmap(":/icons/insert-table.png")
drag.setHotSpot(QtCore.QPoint(pixmap.width()/3, pixmap.height()/3))
drag.setPixmap(pixmap)
result = drag.start(Qt.MoveAction) | [
"def",
"startDrag",
"(",
"self",
",",
"index",
")",
":",
"if",
"not",
"index",
".",
"isValid",
"(",
")",
":",
"return",
"dataFrame",
"=",
"self",
".",
"model",
"(",
")",
".",
"dataFrame",
"(",
")",
"# get all infos from dataFrame",
"dfindex",
"=",
"dataFrame",
".",
"iloc",
"[",
"[",
"index",
".",
"row",
"(",
")",
"]",
"]",
".",
"index",
"columnName",
"=",
"dataFrame",
".",
"columns",
"[",
"index",
".",
"column",
"(",
")",
"]",
"dtype",
"=",
"dataFrame",
"[",
"columnName",
"]",
".",
"dtype",
"value",
"=",
"dataFrame",
"[",
"columnName",
"]",
"[",
"dfindex",
"]",
"# create the mime data",
"mimePayload",
"=",
"PandasCellPayload",
"(",
"dfindex",
",",
"columnName",
",",
"value",
",",
"dtype",
",",
"hex",
"(",
"id",
"(",
"self",
".",
"model",
"(",
")",
")",
")",
")",
"mimeData",
"=",
"MimeData",
"(",
")",
"mimeData",
".",
"setData",
"(",
"mimePayload",
")",
"# create the drag icon and start drag operation",
"drag",
"=",
"QtGui",
".",
"QDrag",
"(",
"self",
")",
"drag",
".",
"setMimeData",
"(",
"mimeData",
")",
"pixmap",
"=",
"QtGui",
".",
"QPixmap",
"(",
"\":/icons/insert-table.png\"",
")",
"drag",
".",
"setHotSpot",
"(",
"QtCore",
".",
"QPoint",
"(",
"pixmap",
".",
"width",
"(",
")",
"/",
"3",
",",
"pixmap",
".",
"height",
"(",
")",
"/",
"3",
")",
")",
"drag",
".",
"setPixmap",
"(",
"pixmap",
")",
"result",
"=",
"drag",
".",
"start",
"(",
"Qt",
".",
"MoveAction",
")"
]
| 31.972222 | 16.666667 |
def locked_blocks_iterator(blockfile, start_info=(0, 0), cached_headers=50, batch_size=50):
"""
This method loads blocks from disk, skipping any orphan blocks.
"""
f = blockfile
current_state = []
def change_state(bc, ops):
for op, bh, work in ops:
if op == 'add':
current_state.append(bh)
pass
else:
current_state.pop()
bc = BlockChain()
bc.add_change_callback(change_state)
bhs = []
index = 0
info_offset = start_info
while 1:
v = blockfile.next_offset(info_offset)
if v is None:
break
block_offset, info_offset = v
f.jump_to(block_offset)
bh = Block.parse_as_header(f)
bh.info = block_offset
bhs.append(bh)
if len(bhs) > batch_size:
bc.add_headers(bhs)
bhs = []
if len(current_state) > cached_headers:
for bh in current_state[:cached_headers]:
bh.index = index
yield bh
index += 1
bc.lock_to_index(index)
current_state = current_state[cached_headers:] | [
"def",
"locked_blocks_iterator",
"(",
"blockfile",
",",
"start_info",
"=",
"(",
"0",
",",
"0",
")",
",",
"cached_headers",
"=",
"50",
",",
"batch_size",
"=",
"50",
")",
":",
"f",
"=",
"blockfile",
"current_state",
"=",
"[",
"]",
"def",
"change_state",
"(",
"bc",
",",
"ops",
")",
":",
"for",
"op",
",",
"bh",
",",
"work",
"in",
"ops",
":",
"if",
"op",
"==",
"'add'",
":",
"current_state",
".",
"append",
"(",
"bh",
")",
"pass",
"else",
":",
"current_state",
".",
"pop",
"(",
")",
"bc",
"=",
"BlockChain",
"(",
")",
"bc",
".",
"add_change_callback",
"(",
"change_state",
")",
"bhs",
"=",
"[",
"]",
"index",
"=",
"0",
"info_offset",
"=",
"start_info",
"while",
"1",
":",
"v",
"=",
"blockfile",
".",
"next_offset",
"(",
"info_offset",
")",
"if",
"v",
"is",
"None",
":",
"break",
"block_offset",
",",
"info_offset",
"=",
"v",
"f",
".",
"jump_to",
"(",
"block_offset",
")",
"bh",
"=",
"Block",
".",
"parse_as_header",
"(",
"f",
")",
"bh",
".",
"info",
"=",
"block_offset",
"bhs",
".",
"append",
"(",
"bh",
")",
"if",
"len",
"(",
"bhs",
")",
">",
"batch_size",
":",
"bc",
".",
"add_headers",
"(",
"bhs",
")",
"bhs",
"=",
"[",
"]",
"if",
"len",
"(",
"current_state",
")",
">",
"cached_headers",
":",
"for",
"bh",
"in",
"current_state",
"[",
":",
"cached_headers",
"]",
":",
"bh",
".",
"index",
"=",
"index",
"yield",
"bh",
"index",
"+=",
"1",
"bc",
".",
"lock_to_index",
"(",
"index",
")",
"current_state",
"=",
"current_state",
"[",
"cached_headers",
":",
"]"
]
| 30 | 15.333333 |
def find_faderport_input_name(number=0):
"""
Find the MIDI input name for a connected FaderPort.
NOTE! Untested for more than one FaderPort attached.
:param number: 0 unless you've got more than one FaderPort attached.
In which case 0 is the first, 1 is the second etc
:return: Port name or None
"""
ins = [i for i in mido.get_input_names() if i.lower().startswith('faderport')]
if 0 <= number < len(ins):
return ins[number]
else:
return None | [
"def",
"find_faderport_input_name",
"(",
"number",
"=",
"0",
")",
":",
"ins",
"=",
"[",
"i",
"for",
"i",
"in",
"mido",
".",
"get_input_names",
"(",
")",
"if",
"i",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'faderport'",
")",
"]",
"if",
"0",
"<=",
"number",
"<",
"len",
"(",
"ins",
")",
":",
"return",
"ins",
"[",
"number",
"]",
"else",
":",
"return",
"None"
]
| 35.785714 | 18.5 |
def replace_version_tag(self):
"""find the next major/minor/trivial version number if applicable"""
version_tag = self.arguments.get('<version>')
special_keywords = ['current', 'latest']
if version_tag in special_keywords:
logger.error("releasing version '{}' is disallowed. Did you mean 'version upload'?".format(version_tag))
raise SystemExit(1)
placeholders = dict(major=0, minor=1, trivial=2)
placeholder = placeholders.get(version_tag)
if placeholder is None:
return version_tag
current_version = self.get_git_describe().lstrip('v')
version_numbers = current_version.split('-')[0].split('.')
version_numbers = [int(item) for item in version_numbers]
version_numbers = version_numbers[:placeholder + 1]
while len(version_numbers) < 3:
version_numbers.append(0)
version_numbers[placeholder] += 1
return '.'.join([str(item) for item in version_numbers[:2 if placeholder < 2 else 3]]) | [
"def",
"replace_version_tag",
"(",
"self",
")",
":",
"version_tag",
"=",
"self",
".",
"arguments",
".",
"get",
"(",
"'<version>'",
")",
"special_keywords",
"=",
"[",
"'current'",
",",
"'latest'",
"]",
"if",
"version_tag",
"in",
"special_keywords",
":",
"logger",
".",
"error",
"(",
"\"releasing version '{}' is disallowed. Did you mean 'version upload'?\"",
".",
"format",
"(",
"version_tag",
")",
")",
"raise",
"SystemExit",
"(",
"1",
")",
"placeholders",
"=",
"dict",
"(",
"major",
"=",
"0",
",",
"minor",
"=",
"1",
",",
"trivial",
"=",
"2",
")",
"placeholder",
"=",
"placeholders",
".",
"get",
"(",
"version_tag",
")",
"if",
"placeholder",
"is",
"None",
":",
"return",
"version_tag",
"current_version",
"=",
"self",
".",
"get_git_describe",
"(",
")",
".",
"lstrip",
"(",
"'v'",
")",
"version_numbers",
"=",
"current_version",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"version_numbers",
"=",
"[",
"int",
"(",
"item",
")",
"for",
"item",
"in",
"version_numbers",
"]",
"version_numbers",
"=",
"version_numbers",
"[",
":",
"placeholder",
"+",
"1",
"]",
"while",
"len",
"(",
"version_numbers",
")",
"<",
"3",
":",
"version_numbers",
".",
"append",
"(",
"0",
")",
"version_numbers",
"[",
"placeholder",
"]",
"+=",
"1",
"return",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"item",
")",
"for",
"item",
"in",
"version_numbers",
"[",
":",
"2",
"if",
"placeholder",
"<",
"2",
"else",
"3",
"]",
"]",
")"
]
| 54.052632 | 16.578947 |
def _parse_plan(self, match):
"""Parse a matching plan line."""
expected_tests = int(match.group("expected"))
directive = Directive(match.group("directive"))
# Only SKIP directives are allowed in the plan.
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive) | [
"def",
"_parse_plan",
"(",
"self",
",",
"match",
")",
":",
"expected_tests",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"\"expected\"",
")",
")",
"directive",
"=",
"Directive",
"(",
"match",
".",
"group",
"(",
"\"directive\"",
")",
")",
"# Only SKIP directives are allowed in the plan.",
"if",
"directive",
".",
"text",
"and",
"not",
"directive",
".",
"skip",
":",
"return",
"Unknown",
"(",
")",
"return",
"Plan",
"(",
"expected_tests",
",",
"directive",
")"
]
| 35.6 | 16.1 |
def _compare_dbs_getter(self, db):
"""Retrieve a dictionary of table_name, row count key value pairs for a DB."""
# Change DB connection if needed
if self.database != db:
self.change_db(db)
return self.count_rows_all() | [
"def",
"_compare_dbs_getter",
"(",
"self",
",",
"db",
")",
":",
"# Change DB connection if needed",
"if",
"self",
".",
"database",
"!=",
"db",
":",
"self",
".",
"change_db",
"(",
"db",
")",
"return",
"self",
".",
"count_rows_all",
"(",
")"
]
| 42.833333 | 4.833333 |
def get(self, id):
"""Get a object by id
Args:
id (int): Object id
Returns:
Object: Object with specified id
None: If object not found
"""
for obj in self.model.db:
if obj["id"] == id:
return self._cast_model(obj)
return None | [
"def",
"get",
"(",
"self",
",",
"id",
")",
":",
"for",
"obj",
"in",
"self",
".",
"model",
".",
"db",
":",
"if",
"obj",
"[",
"\"id\"",
"]",
"==",
"id",
":",
"return",
"self",
".",
"_cast_model",
"(",
"obj",
")",
"return",
"None"
]
| 24.714286 | 14.285714 |
def _learner_distributed(learn:Learner, cuda_id:int, cache_dir:PathOrStr='tmp'):
"Put `learn` on distributed training with `cuda_id`."
learn.callbacks.append(DistributedTrainer(learn, cuda_id))
learn.callbacks.append(DistributedRecorder(learn, cuda_id, cache_dir))
return learn | [
"def",
"_learner_distributed",
"(",
"learn",
":",
"Learner",
",",
"cuda_id",
":",
"int",
",",
"cache_dir",
":",
"PathOrStr",
"=",
"'tmp'",
")",
":",
"learn",
".",
"callbacks",
".",
"append",
"(",
"DistributedTrainer",
"(",
"learn",
",",
"cuda_id",
")",
")",
"learn",
".",
"callbacks",
".",
"append",
"(",
"DistributedRecorder",
"(",
"learn",
",",
"cuda_id",
",",
"cache_dir",
")",
")",
"return",
"learn"
]
| 57.8 | 27.4 |
def start(self, initializer=None, initargs=()):
'''Spawn a server process for this manager object'''
assert self._state.value == State.INITIAL
if (initializer is not None
and not hasattr(initializer, '__call__')):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = mp.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, bytes(self._authkey),
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = mp.util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
) | [
"def",
"start",
"(",
"self",
",",
"initializer",
"=",
"None",
",",
"initargs",
"=",
"(",
")",
")",
":",
"assert",
"self",
".",
"_state",
".",
"value",
"==",
"State",
".",
"INITIAL",
"if",
"(",
"initializer",
"is",
"not",
"None",
"and",
"not",
"hasattr",
"(",
"initializer",
",",
"'__call__'",
")",
")",
":",
"raise",
"TypeError",
"(",
"'initializer must be a callable'",
")",
"# pipe over which we will retrieve address of server",
"reader",
",",
"writer",
"=",
"mp",
".",
"Pipe",
"(",
"duplex",
"=",
"False",
")",
"# spawn process which runs a server",
"self",
".",
"_process",
"=",
"Process",
"(",
"target",
"=",
"type",
"(",
"self",
")",
".",
"_run_server",
",",
"args",
"=",
"(",
"self",
".",
"_registry",
",",
"self",
".",
"_address",
",",
"bytes",
"(",
"self",
".",
"_authkey",
")",
",",
"self",
".",
"_serializer",
",",
"writer",
",",
"initializer",
",",
"initargs",
")",
",",
")",
"ident",
"=",
"':'",
".",
"join",
"(",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"self",
".",
"_process",
".",
"_identity",
")",
"self",
".",
"_process",
".",
"name",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
"+",
"'-'",
"+",
"ident",
"self",
".",
"_process",
".",
"start",
"(",
")",
"# get address of server",
"writer",
".",
"close",
"(",
")",
"self",
".",
"_address",
"=",
"reader",
".",
"recv",
"(",
")",
"reader",
".",
"close",
"(",
")",
"# register a finalizer",
"self",
".",
"_state",
".",
"value",
"=",
"State",
".",
"STARTED",
"self",
".",
"shutdown",
"=",
"mp",
".",
"util",
".",
"Finalize",
"(",
"self",
",",
"type",
"(",
"self",
")",
".",
"_finalize_manager",
",",
"args",
"=",
"(",
"self",
".",
"_process",
",",
"self",
".",
"_address",
",",
"self",
".",
"_authkey",
",",
"self",
".",
"_state",
",",
"self",
".",
"_Client",
")",
",",
"exitpriority",
"=",
"0",
")"
]
| 36.705882 | 17.764706 |
def parse_description_xml(location):
""" Extract serial number, base ip, and img url from description.xml
missing data from XML returns AttributeError
malformed XML returns ParseError
Refer to included example for URLBase and serialNumber elements
"""
class _URLBase(str):
""" Convenient access to hostname (ip) portion of the URL """
@property
def hostname(self):
return urlsplit(self).hostname
# """TODO: review error handling on xml"""
# may want to suppress ParseError in the event that it was caused
# by a none bridge device although this seems unlikely
try:
xml_str = from_url(location)
except urllib.request.HTTPError as error:
logger.info("No description for %s: %s", location, error)
return None, error
except urllib.request.URLError as error:
logger.info("No HTTP server for %s: %s", location, error)
return None, error
else:
root = ET.fromstring(xml_str)
rootname = {'root': root.tag[root.tag.find('{')+1:root.tag.find('}')]}
baseip = root.find('root:URLBase', rootname).text
device = root.find('root:device', rootname)
serial = device.find('root:serialNumber', rootname).text
# anicon = device.find('root:iconList', rootname).find('root:icon', rootname)
# imgurl = anicon.find('root:url', rootname).text
# Alternatively, could look directly in the modelDescription field
if all(x in xml_str.lower() for x in ['philips', 'hue']):
return serial, _URLBase(baseip)
else:
return None, None | [
"def",
"parse_description_xml",
"(",
"location",
")",
":",
"class",
"_URLBase",
"(",
"str",
")",
":",
"\"\"\" Convenient access to hostname (ip) portion of the URL \"\"\"",
"@",
"property",
"def",
"hostname",
"(",
"self",
")",
":",
"return",
"urlsplit",
"(",
"self",
")",
".",
"hostname",
"# \"\"\"TODO: review error handling on xml\"\"\"",
"# may want to suppress ParseError in the event that it was caused",
"# by a none bridge device although this seems unlikely",
"try",
":",
"xml_str",
"=",
"from_url",
"(",
"location",
")",
"except",
"urllib",
".",
"request",
".",
"HTTPError",
"as",
"error",
":",
"logger",
".",
"info",
"(",
"\"No description for %s: %s\"",
",",
"location",
",",
"error",
")",
"return",
"None",
",",
"error",
"except",
"urllib",
".",
"request",
".",
"URLError",
"as",
"error",
":",
"logger",
".",
"info",
"(",
"\"No HTTP server for %s: %s\"",
",",
"location",
",",
"error",
")",
"return",
"None",
",",
"error",
"else",
":",
"root",
"=",
"ET",
".",
"fromstring",
"(",
"xml_str",
")",
"rootname",
"=",
"{",
"'root'",
":",
"root",
".",
"tag",
"[",
"root",
".",
"tag",
".",
"find",
"(",
"'{'",
")",
"+",
"1",
":",
"root",
".",
"tag",
".",
"find",
"(",
"'}'",
")",
"]",
"}",
"baseip",
"=",
"root",
".",
"find",
"(",
"'root:URLBase'",
",",
"rootname",
")",
".",
"text",
"device",
"=",
"root",
".",
"find",
"(",
"'root:device'",
",",
"rootname",
")",
"serial",
"=",
"device",
".",
"find",
"(",
"'root:serialNumber'",
",",
"rootname",
")",
".",
"text",
"# anicon = device.find('root:iconList', rootname).find('root:icon', rootname)",
"# imgurl = anicon.find('root:url', rootname).text",
"# Alternatively, could look directly in the modelDescription field",
"if",
"all",
"(",
"x",
"in",
"xml_str",
".",
"lower",
"(",
")",
"for",
"x",
"in",
"[",
"'philips'",
",",
"'hue'",
"]",
")",
":",
"return",
"serial",
",",
"_URLBase",
"(",
"baseip",
")",
"else",
":",
"return",
"None",
",",
"None"
]
| 40.897436 | 18.282051 |
def _potInt(x,y,z,a2,b2,c2,n):
"""Integral involed in the potential at (x,y,z)
integrates 1/A B^(n+1) where
A = sqrt((tau+a)(tau+b)(tau+c)) and B = (1-x^2/(tau+a)-y^2/(tau+b)-z^2/(tau+c))
from lambda to infty with respect to tau.
The lower limit lambda is given by lowerlim function.
"""
def integrand(tau):
return _FracInt(x, y, z, a2, b2, c2, tau, n+1)
return integrate.quad(integrand, lowerlim(x**2,y**2,z**2,a2,b2,c2), np.inf)[0] | [
"def",
"_potInt",
"(",
"x",
",",
"y",
",",
"z",
",",
"a2",
",",
"b2",
",",
"c2",
",",
"n",
")",
":",
"def",
"integrand",
"(",
"tau",
")",
":",
"return",
"_FracInt",
"(",
"x",
",",
"y",
",",
"z",
",",
"a2",
",",
"b2",
",",
"c2",
",",
"tau",
",",
"n",
"+",
"1",
")",
"return",
"integrate",
".",
"quad",
"(",
"integrand",
",",
"lowerlim",
"(",
"x",
"**",
"2",
",",
"y",
"**",
"2",
",",
"z",
"**",
"2",
",",
"a2",
",",
"b2",
",",
"c2",
")",
",",
"np",
".",
"inf",
")",
"[",
"0",
"]"
]
| 46.4 | 15.6 |
def make_srcmap_manifest(self, components, name_factory):
""" Build a yaml file that specfies how to make the srcmap files for a particular model
Parameters
----------
components : list
The binning components used in this analysis
name_factory : `NameFactory`
Object that handles naming conventions
Returns a dictionary that contains information about where to find the
source maps for each component of the model
"""
ret_dict = {}
for comp in components:
compkey = comp.make_key('{ebin_name}_{evtype_name}')
zcut = "zmax%i" % comp.zmax
name_keys = dict(modelkey=self.model_name,
zcut=zcut,
ebin=comp.ebin_name,
mktime='none',
psftype=comp.evtype_name,
coordsys=comp.coordsys)
outsrcmap = name_factory.merged_srcmaps(**name_keys)
ccube = name_factory.ccube(**name_keys)
src_dict = {}
for comp_name, model_comp in self.model_components.items():
comp_info = model_comp.info
model_type = comp_info.model_type
name_keys['sourcekey'] = comp_name
if model_type in ['CatalogSources']:
#sourcekey = comp_info.comp_key
sources = comp_info.source_names
name_keys['sourcekey'] = comp_info.catalog_info.catalog_name
elif model_type in ['CompositeSource']:
#sourcekey = comp_info.sourcekey
name_keys['sourcekey'] = comp_info.sourcekey
sources = [comp_info.source_name]
else:
#sourcekey = comp_name
sources = [comp_info.source_name]
src_dict[comp_name] = dict(sourcekey=comp_name,
srcmap_file=name_factory.srcmaps(**name_keys),
source_names=sources)
comp_dict = dict(outsrcmap=outsrcmap,
ccube=ccube,
source_dict=src_dict)
ret_dict[compkey] = comp_dict
return ret_dict | [
"def",
"make_srcmap_manifest",
"(",
"self",
",",
"components",
",",
"name_factory",
")",
":",
"ret_dict",
"=",
"{",
"}",
"for",
"comp",
"in",
"components",
":",
"compkey",
"=",
"comp",
".",
"make_key",
"(",
"'{ebin_name}_{evtype_name}'",
")",
"zcut",
"=",
"\"zmax%i\"",
"%",
"comp",
".",
"zmax",
"name_keys",
"=",
"dict",
"(",
"modelkey",
"=",
"self",
".",
"model_name",
",",
"zcut",
"=",
"zcut",
",",
"ebin",
"=",
"comp",
".",
"ebin_name",
",",
"mktime",
"=",
"'none'",
",",
"psftype",
"=",
"comp",
".",
"evtype_name",
",",
"coordsys",
"=",
"comp",
".",
"coordsys",
")",
"outsrcmap",
"=",
"name_factory",
".",
"merged_srcmaps",
"(",
"*",
"*",
"name_keys",
")",
"ccube",
"=",
"name_factory",
".",
"ccube",
"(",
"*",
"*",
"name_keys",
")",
"src_dict",
"=",
"{",
"}",
"for",
"comp_name",
",",
"model_comp",
"in",
"self",
".",
"model_components",
".",
"items",
"(",
")",
":",
"comp_info",
"=",
"model_comp",
".",
"info",
"model_type",
"=",
"comp_info",
".",
"model_type",
"name_keys",
"[",
"'sourcekey'",
"]",
"=",
"comp_name",
"if",
"model_type",
"in",
"[",
"'CatalogSources'",
"]",
":",
"#sourcekey = comp_info.comp_key",
"sources",
"=",
"comp_info",
".",
"source_names",
"name_keys",
"[",
"'sourcekey'",
"]",
"=",
"comp_info",
".",
"catalog_info",
".",
"catalog_name",
"elif",
"model_type",
"in",
"[",
"'CompositeSource'",
"]",
":",
"#sourcekey = comp_info.sourcekey",
"name_keys",
"[",
"'sourcekey'",
"]",
"=",
"comp_info",
".",
"sourcekey",
"sources",
"=",
"[",
"comp_info",
".",
"source_name",
"]",
"else",
":",
"#sourcekey = comp_name",
"sources",
"=",
"[",
"comp_info",
".",
"source_name",
"]",
"src_dict",
"[",
"comp_name",
"]",
"=",
"dict",
"(",
"sourcekey",
"=",
"comp_name",
",",
"srcmap_file",
"=",
"name_factory",
".",
"srcmaps",
"(",
"*",
"*",
"name_keys",
")",
",",
"source_names",
"=",
"sources",
")",
"comp_dict",
"=",
"dict",
"(",
"outsrcmap",
"=",
"outsrcmap",
",",
"ccube",
"=",
"ccube",
",",
"source_dict",
"=",
"src_dict",
")",
"ret_dict",
"[",
"compkey",
"]",
"=",
"comp_dict",
"return",
"ret_dict"
]
| 44.038462 | 16.75 |
def monitor(self, pk, parent_pk=None, timeout=None, interval=0.5, outfile=sys.stdout, **kwargs):
"""
Stream the standard output from a job, project update, or inventory udpate.
=====API DOCS=====
Stream the standard output from a job run to stdout.
:param pk: Primary key of the job resource object to be monitored.
:type pk: int
:param parent_pk: Primary key of the unified job template resource object whose latest job run will be
monitored if ``pk`` is not set.
:type parent_pk: int
:param timeout: Number in seconds after which this method will time out.
:type timeout: float
:param interval: Polling interval to refresh content from Tower.
:type interval: float
:param outfile: Alternative file than stdout to write job stdout to.
:type outfile: file
:param `**kwargs`: Keyword arguments used to look up job resource object to monitor if ``pk`` is
not provided.
:returns: A dictionary combining the JSON output of the finished job resource object, as well as
two extra fields: "changed", a flag indicating if the job resource object is finished
as expected; "id", an integer which is the primary key of the job resource object being
monitored.
:rtype: dict
:raises tower_cli.exceptions.Timeout: When monitor time reaches time out.
:raises tower_cli.exceptions.JobFailure: When the job being monitored runs into failure.
=====API DOCS=====
"""
# If we do not have the unified job info, infer it from parent
if pk is None:
pk = self.last_job_data(parent_pk, **kwargs)['id']
job_endpoint = '%s%s/' % (self.unified_job_type, pk)
# Pause until job is in running state
self.wait(pk, exit_on=['running', 'successful'], outfile=outfile)
# Loop initialization
start = time.time()
start_line = 0
result = client.get(job_endpoint).json()
click.echo('\033[0;91m------Starting Standard Out Stream------\033[0m', nl=2, file=outfile)
# Poll the Ansible Tower instance for status and content, and print standard out to the out file
while not result['failed'] and result['status'] != 'successful':
result = client.get(job_endpoint).json()
# Put the process to sleep briefly.
time.sleep(interval)
# Make request to get standard out
content = self.lookup_stdout(pk, start_line, full=False)
# In the first moments of running the job, the standard out
# may not be available yet
if not content.startswith("Waiting for results"):
line_count = len(content.splitlines())
start_line += line_count
click.echo(content, nl=0, file=outfile)
if timeout and time.time() - start > timeout:
raise exc.Timeout('Monitoring aborted due to timeout.')
# Special final line for closure with workflow jobs
if self.endpoint == '/workflow_jobs/':
click.echo(self.lookup_stdout(pk, start_line, full=True), nl=1)
click.echo('\033[0;91m------End of Standard Out Stream--------\033[0m', nl=2, file=outfile)
if result['failed']:
raise exc.JobFailure('Job failed.')
# Return the job ID and other response data
answer = OrderedDict((('changed', True), ('id', pk)))
answer.update(result)
# Make sure to return ID of resource and not update number relevant for project creation and update
if parent_pk:
answer['id'] = parent_pk
else:
answer['id'] = pk
return answer | [
"def",
"monitor",
"(",
"self",
",",
"pk",
",",
"parent_pk",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"interval",
"=",
"0.5",
",",
"outfile",
"=",
"sys",
".",
"stdout",
",",
"*",
"*",
"kwargs",
")",
":",
"# If we do not have the unified job info, infer it from parent",
"if",
"pk",
"is",
"None",
":",
"pk",
"=",
"self",
".",
"last_job_data",
"(",
"parent_pk",
",",
"*",
"*",
"kwargs",
")",
"[",
"'id'",
"]",
"job_endpoint",
"=",
"'%s%s/'",
"%",
"(",
"self",
".",
"unified_job_type",
",",
"pk",
")",
"# Pause until job is in running state",
"self",
".",
"wait",
"(",
"pk",
",",
"exit_on",
"=",
"[",
"'running'",
",",
"'successful'",
"]",
",",
"outfile",
"=",
"outfile",
")",
"# Loop initialization",
"start",
"=",
"time",
".",
"time",
"(",
")",
"start_line",
"=",
"0",
"result",
"=",
"client",
".",
"get",
"(",
"job_endpoint",
")",
".",
"json",
"(",
")",
"click",
".",
"echo",
"(",
"'\\033[0;91m------Starting Standard Out Stream------\\033[0m'",
",",
"nl",
"=",
"2",
",",
"file",
"=",
"outfile",
")",
"# Poll the Ansible Tower instance for status and content, and print standard out to the out file",
"while",
"not",
"result",
"[",
"'failed'",
"]",
"and",
"result",
"[",
"'status'",
"]",
"!=",
"'successful'",
":",
"result",
"=",
"client",
".",
"get",
"(",
"job_endpoint",
")",
".",
"json",
"(",
")",
"# Put the process to sleep briefly.",
"time",
".",
"sleep",
"(",
"interval",
")",
"# Make request to get standard out",
"content",
"=",
"self",
".",
"lookup_stdout",
"(",
"pk",
",",
"start_line",
",",
"full",
"=",
"False",
")",
"# In the first moments of running the job, the standard out",
"# may not be available yet",
"if",
"not",
"content",
".",
"startswith",
"(",
"\"Waiting for results\"",
")",
":",
"line_count",
"=",
"len",
"(",
"content",
".",
"splitlines",
"(",
")",
")",
"start_line",
"+=",
"line_count",
"click",
".",
"echo",
"(",
"content",
",",
"nl",
"=",
"0",
",",
"file",
"=",
"outfile",
")",
"if",
"timeout",
"and",
"time",
".",
"time",
"(",
")",
"-",
"start",
">",
"timeout",
":",
"raise",
"exc",
".",
"Timeout",
"(",
"'Monitoring aborted due to timeout.'",
")",
"# Special final line for closure with workflow jobs",
"if",
"self",
".",
"endpoint",
"==",
"'/workflow_jobs/'",
":",
"click",
".",
"echo",
"(",
"self",
".",
"lookup_stdout",
"(",
"pk",
",",
"start_line",
",",
"full",
"=",
"True",
")",
",",
"nl",
"=",
"1",
")",
"click",
".",
"echo",
"(",
"'\\033[0;91m------End of Standard Out Stream--------\\033[0m'",
",",
"nl",
"=",
"2",
",",
"file",
"=",
"outfile",
")",
"if",
"result",
"[",
"'failed'",
"]",
":",
"raise",
"exc",
".",
"JobFailure",
"(",
"'Job failed.'",
")",
"# Return the job ID and other response data",
"answer",
"=",
"OrderedDict",
"(",
"(",
"(",
"'changed'",
",",
"True",
")",
",",
"(",
"'id'",
",",
"pk",
")",
")",
")",
"answer",
".",
"update",
"(",
"result",
")",
"# Make sure to return ID of resource and not update number relevant for project creation and update",
"if",
"parent_pk",
":",
"answer",
"[",
"'id'",
"]",
"=",
"parent_pk",
"else",
":",
"answer",
"[",
"'id'",
"]",
"=",
"pk",
"return",
"answer"
]
| 44.035294 | 27.776471 |
def run_task(self, task_name, task_args=[], task_kwargs={}):
"""
Run asynchronous task on a :class:`carotte.Worker`.
:param string task_name: Name of task to execute
:param list task_args: (optional) List of arguments to give to task
:param dict task_kwargs: (optional) Dict of keyword arguments
to give to task
:returns: :class:`carotte.Task` object
"""
data = {
'action': 'run_task',
'name': task_name,
'args': task_args,
'kwargs': task_kwargs}
self.__send_pyobj(data)
task = self.__recv_pyobj()
task.client = self
return task | [
"def",
"run_task",
"(",
"self",
",",
"task_name",
",",
"task_args",
"=",
"[",
"]",
",",
"task_kwargs",
"=",
"{",
"}",
")",
":",
"data",
"=",
"{",
"'action'",
":",
"'run_task'",
",",
"'name'",
":",
"task_name",
",",
"'args'",
":",
"task_args",
",",
"'kwargs'",
":",
"task_kwargs",
"}",
"self",
".",
"__send_pyobj",
"(",
"data",
")",
"task",
"=",
"self",
".",
"__recv_pyobj",
"(",
")",
"task",
".",
"client",
"=",
"self",
"return",
"task"
]
| 32.761905 | 17.142857 |
def emit(self, **kwargs):
"""Emit signal by calling all connected slots.
The arguments supplied have to match the signal definition.
Args:
kwargs: Keyword arguments to be passed to connected slots.
Raises:
:exc:`InvalidEmit`: If arguments don't match signal specification.
"""
self._ensure_emit_kwargs(kwargs)
for slot in self.slots:
slot(**kwargs) | [
"def",
"emit",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_ensure_emit_kwargs",
"(",
"kwargs",
")",
"for",
"slot",
"in",
"self",
".",
"slots",
":",
"slot",
"(",
"*",
"*",
"kwargs",
")"
]
| 30.714286 | 21.785714 |
def add_component(self, alias: str, type: Union[str, type] = None, **config):
"""
Add a child component.
This will instantiate a component class, as specified by the ``type`` argument.
If the second argument is omitted, the value of ``alias`` is used as its value.
The locally given configuration can be overridden by component configuration parameters
supplied to the constructor (via the ``components`` argument).
When configuration values are provided both as keyword arguments to this method and
component configuration through the ``components`` constructor argument, the configurations
are merged together using :func:`~asphalt.core.util.merge_config` in a way that the
configuration values from the ``components`` argument override the keyword arguments to
this method.
:param alias: a name for the component instance, unique within this container
:param type: entry point name or :class:`Component` subclass or a ``module:varname``
reference to one
:param config: keyword arguments passed to the component's constructor
"""
assert check_argument_types()
if not isinstance(alias, str) or not alias:
raise TypeError('component_alias must be a nonempty string')
if alias in self.child_components:
raise ValueError('there is already a child component named "{}"'.format(alias))
config['type'] = type or alias
# Allow the external configuration to override the constructor arguments
override_config = self.component_configs.get(alias) or {}
config = merge_config(config, override_config)
component = component_types.create_object(**config)
self.child_components[alias] = component | [
"def",
"add_component",
"(",
"self",
",",
"alias",
":",
"str",
",",
"type",
":",
"Union",
"[",
"str",
",",
"type",
"]",
"=",
"None",
",",
"*",
"*",
"config",
")",
":",
"assert",
"check_argument_types",
"(",
")",
"if",
"not",
"isinstance",
"(",
"alias",
",",
"str",
")",
"or",
"not",
"alias",
":",
"raise",
"TypeError",
"(",
"'component_alias must be a nonempty string'",
")",
"if",
"alias",
"in",
"self",
".",
"child_components",
":",
"raise",
"ValueError",
"(",
"'there is already a child component named \"{}\"'",
".",
"format",
"(",
"alias",
")",
")",
"config",
"[",
"'type'",
"]",
"=",
"type",
"or",
"alias",
"# Allow the external configuration to override the constructor arguments",
"override_config",
"=",
"self",
".",
"component_configs",
".",
"get",
"(",
"alias",
")",
"or",
"{",
"}",
"config",
"=",
"merge_config",
"(",
"config",
",",
"override_config",
")",
"component",
"=",
"component_types",
".",
"create_object",
"(",
"*",
"*",
"config",
")",
"self",
".",
"child_components",
"[",
"alias",
"]",
"=",
"component"
]
| 48.216216 | 31.783784 |
def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'):
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s)
j : str
The name of the sub-observation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hyphen by specifying `sep='-'`
.. versionadded:: 0.20.0
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
A-one, B-two,.., and you have an unrelated column A-rating, you can
ignore the last one by specifying `suffix='(!?one|two)'`
.. versionadded:: 0.20.0
.. versionchanged:: 0.23.0
When all suffixes are numeric, they are cast to int64/float64.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j).
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typical case.
Examples
--------
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multiple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht1 ht2
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.unstack()
>>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3),
... 'A(quarterly)-2011': np.random.rand(3),
... 'B(quarterly)-2010': np.random.rand(3),
... 'B(quarterly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ...
0 0.548814 0.544883 0.437587 ...
1 0.715189 0.423655 0.891773 ...
2 0.602763 0.645894 0.963663 ...
X id
0 0 0
1 1 1
2 1 2
>>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(quarterly) B(quarterly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != [] ])
... )
>>> list(stubnames)
['A(quarterly)', 'B(quarterly)']
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht_one ht_two
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
sep='_', suffix='\w')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 one 2.8
two 3.4
2 one 2.9
two 3.8
3 one 2.2
two 2.9
2 1 one 2.0
two 3.2
2 one 1.8
two 2.8
3 one 1.9
two 2.4
3 1 one 2.2
two 3.3
2 one 2.3
two 3.4
3 one 2.1
two 2.9
"""
def get_var_names(df, stub, sep, suffix):
regex = r'^{stub}{sep}{suffix}$'.format(
stub=re.escape(stub), sep=re.escape(sep), suffix=suffix)
pattern = re.compile(regex)
return [col for col in df.columns if pattern.match(col)]
def melt_stub(df, stub, i, j, value_vars, sep):
newdf = melt(df, id_vars=i, value_vars=value_vars,
value_name=stub.rstrip(sep), var_name=j)
newdf[j] = Categorical(newdf[j])
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "")
# GH17627 Cast numerics suffixes to int/float
newdf[j] = to_numeric(newdf[j], errors='ignore')
return newdf.set_index(i + [j])
if not is_list_like(stubnames):
stubnames = [stubnames]
else:
stubnames = list(stubnames)
if any(col in stubnames for col in df.columns):
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(i):
i = [i]
else:
i = list(i)
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames]
value_vars_flattened = [e for sublist in value_vars for e in sublist]
id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))
melted = [melt_stub(df, s, i, j, v, sep)
for s, v in zip(stubnames, value_vars)]
melted = melted[0].join(melted[1:], how='outer')
if len(i) == 1:
new = df[id_vars].set_index(i).join(melted)
return new
new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j])
return new | [
"def",
"wide_to_long",
"(",
"df",
",",
"stubnames",
",",
"i",
",",
"j",
",",
"sep",
"=",
"\"\"",
",",
"suffix",
"=",
"r'\\d+'",
")",
":",
"def",
"get_var_names",
"(",
"df",
",",
"stub",
",",
"sep",
",",
"suffix",
")",
":",
"regex",
"=",
"r'^{stub}{sep}{suffix}$'",
".",
"format",
"(",
"stub",
"=",
"re",
".",
"escape",
"(",
"stub",
")",
",",
"sep",
"=",
"re",
".",
"escape",
"(",
"sep",
")",
",",
"suffix",
"=",
"suffix",
")",
"pattern",
"=",
"re",
".",
"compile",
"(",
"regex",
")",
"return",
"[",
"col",
"for",
"col",
"in",
"df",
".",
"columns",
"if",
"pattern",
".",
"match",
"(",
"col",
")",
"]",
"def",
"melt_stub",
"(",
"df",
",",
"stub",
",",
"i",
",",
"j",
",",
"value_vars",
",",
"sep",
")",
":",
"newdf",
"=",
"melt",
"(",
"df",
",",
"id_vars",
"=",
"i",
",",
"value_vars",
"=",
"value_vars",
",",
"value_name",
"=",
"stub",
".",
"rstrip",
"(",
"sep",
")",
",",
"var_name",
"=",
"j",
")",
"newdf",
"[",
"j",
"]",
"=",
"Categorical",
"(",
"newdf",
"[",
"j",
"]",
")",
"newdf",
"[",
"j",
"]",
"=",
"newdf",
"[",
"j",
"]",
".",
"str",
".",
"replace",
"(",
"re",
".",
"escape",
"(",
"stub",
"+",
"sep",
")",
",",
"\"\"",
")",
"# GH17627 Cast numerics suffixes to int/float",
"newdf",
"[",
"j",
"]",
"=",
"to_numeric",
"(",
"newdf",
"[",
"j",
"]",
",",
"errors",
"=",
"'ignore'",
")",
"return",
"newdf",
".",
"set_index",
"(",
"i",
"+",
"[",
"j",
"]",
")",
"if",
"not",
"is_list_like",
"(",
"stubnames",
")",
":",
"stubnames",
"=",
"[",
"stubnames",
"]",
"else",
":",
"stubnames",
"=",
"list",
"(",
"stubnames",
")",
"if",
"any",
"(",
"col",
"in",
"stubnames",
"for",
"col",
"in",
"df",
".",
"columns",
")",
":",
"raise",
"ValueError",
"(",
"\"stubname can't be identical to a column name\"",
")",
"if",
"not",
"is_list_like",
"(",
"i",
")",
":",
"i",
"=",
"[",
"i",
"]",
"else",
":",
"i",
"=",
"list",
"(",
"i",
")",
"if",
"df",
"[",
"i",
"]",
".",
"duplicated",
"(",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"the id variables need to uniquely identify each row\"",
")",
"value_vars",
"=",
"[",
"get_var_names",
"(",
"df",
",",
"stub",
",",
"sep",
",",
"suffix",
")",
"for",
"stub",
"in",
"stubnames",
"]",
"value_vars_flattened",
"=",
"[",
"e",
"for",
"sublist",
"in",
"value_vars",
"for",
"e",
"in",
"sublist",
"]",
"id_vars",
"=",
"list",
"(",
"set",
"(",
"df",
".",
"columns",
".",
"tolist",
"(",
")",
")",
".",
"difference",
"(",
"value_vars_flattened",
")",
")",
"melted",
"=",
"[",
"melt_stub",
"(",
"df",
",",
"s",
",",
"i",
",",
"j",
",",
"v",
",",
"sep",
")",
"for",
"s",
",",
"v",
"in",
"zip",
"(",
"stubnames",
",",
"value_vars",
")",
"]",
"melted",
"=",
"melted",
"[",
"0",
"]",
".",
"join",
"(",
"melted",
"[",
"1",
":",
"]",
",",
"how",
"=",
"'outer'",
")",
"if",
"len",
"(",
"i",
")",
"==",
"1",
":",
"new",
"=",
"df",
"[",
"id_vars",
"]",
".",
"set_index",
"(",
"i",
")",
".",
"join",
"(",
"melted",
")",
"return",
"new",
"new",
"=",
"df",
"[",
"id_vars",
"]",
".",
"merge",
"(",
"melted",
".",
"reset_index",
"(",
")",
",",
"on",
"=",
"i",
")",
".",
"set_index",
"(",
"i",
"+",
"[",
"j",
"]",
")",
"return",
"new"
]
| 33.701068 | 20.259786 |
def set_memcached_backend(self, config):
"""
Select the most suitable Memcached backend based on the config and
on what's installed
"""
# This is the preferred backend as it is the fastest and most fully
# featured, so we use this by default
config['BACKEND'] = 'django_pylibmc.memcached.PyLibMCCache'
if is_importable(config['BACKEND']):
return
# Otherwise, binary connections can use this pure Python implementation
if config.get('BINARY') and is_importable('django_bmemcached'):
config['BACKEND'] = 'django_bmemcached.memcached.BMemcached'
return
# For text-based connections without any authentication we can fall
# back to Django's core backends if the supporting libraries are
# installed
if not any([config.get(key) for key in ('BINARY', 'USERNAME', 'PASSWORD')]):
if is_importable('pylibmc'):
config['BACKEND'] = \
'django.core.cache.backends.memcached.PyLibMCCache'
elif is_importable('memcached'):
config['BACKEND'] = \
'django.core.cache.backends.memcached.MemcachedCache' | [
"def",
"set_memcached_backend",
"(",
"self",
",",
"config",
")",
":",
"# This is the preferred backend as it is the fastest and most fully",
"# featured, so we use this by default",
"config",
"[",
"'BACKEND'",
"]",
"=",
"'django_pylibmc.memcached.PyLibMCCache'",
"if",
"is_importable",
"(",
"config",
"[",
"'BACKEND'",
"]",
")",
":",
"return",
"# Otherwise, binary connections can use this pure Python implementation",
"if",
"config",
".",
"get",
"(",
"'BINARY'",
")",
"and",
"is_importable",
"(",
"'django_bmemcached'",
")",
":",
"config",
"[",
"'BACKEND'",
"]",
"=",
"'django_bmemcached.memcached.BMemcached'",
"return",
"# For text-based connections without any authentication we can fall",
"# back to Django's core backends if the supporting libraries are",
"# installed",
"if",
"not",
"any",
"(",
"[",
"config",
".",
"get",
"(",
"key",
")",
"for",
"key",
"in",
"(",
"'BINARY'",
",",
"'USERNAME'",
",",
"'PASSWORD'",
")",
"]",
")",
":",
"if",
"is_importable",
"(",
"'pylibmc'",
")",
":",
"config",
"[",
"'BACKEND'",
"]",
"=",
"'django.core.cache.backends.memcached.PyLibMCCache'",
"elif",
"is_importable",
"(",
"'memcached'",
")",
":",
"config",
"[",
"'BACKEND'",
"]",
"=",
"'django.core.cache.backends.memcached.MemcachedCache'"
]
| 50.5 | 19.916667 |
def extra_args_from_config(cp, section, skip_args=None, dtypes=None):
"""Gets any additional keyword in the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
section : str
The name of the section to read.
skip_args : list of str, optional
Names of arguments to skip.
dtypes : dict, optional
A dictionary of arguments -> data types. If an argument is found
in the dict, it will be cast to the given datatype. Otherwise, the
argument's value will just be read from the config file (and thus
be a string).
Returns
-------
dict
Dictionary of keyword arguments read from the config file.
"""
kwargs = {}
if dtypes is None:
dtypes = {}
if skip_args is None:
skip_args = []
read_args = [opt for opt in cp.options(section)
if opt not in skip_args]
for opt in read_args:
val = cp.get(section, opt)
# try to cast the value if a datatype was specified for this opt
try:
val = dtypes[opt](val)
except KeyError:
pass
kwargs[opt] = val
return kwargs | [
"def",
"extra_args_from_config",
"(",
"cp",
",",
"section",
",",
"skip_args",
"=",
"None",
",",
"dtypes",
"=",
"None",
")",
":",
"kwargs",
"=",
"{",
"}",
"if",
"dtypes",
"is",
"None",
":",
"dtypes",
"=",
"{",
"}",
"if",
"skip_args",
"is",
"None",
":",
"skip_args",
"=",
"[",
"]",
"read_args",
"=",
"[",
"opt",
"for",
"opt",
"in",
"cp",
".",
"options",
"(",
"section",
")",
"if",
"opt",
"not",
"in",
"skip_args",
"]",
"for",
"opt",
"in",
"read_args",
":",
"val",
"=",
"cp",
".",
"get",
"(",
"section",
",",
"opt",
")",
"# try to cast the value if a datatype was specified for this opt",
"try",
":",
"val",
"=",
"dtypes",
"[",
"opt",
"]",
"(",
"val",
")",
"except",
"KeyError",
":",
"pass",
"kwargs",
"[",
"opt",
"]",
"=",
"val",
"return",
"kwargs"
]
| 34.578947 | 17.447368 |
def make_job(job_name, **kwargs):
"""
Decorator to create a Job from a function.
Give a job name and add extra fields to the job.
@make_job("ExecuteDecJob",
command=mongoengine.StringField(required=True),
output=mongoengine.StringField(default=None))
def execute(job: Job):
job.log_info('ExecuteJob %s - Executing command...' % job.uuid)
result = subprocess.run(job.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
job.output = result.stdout.decode('utf-8') + " " + result.stderr.decode('utf-8')
"""
def wraps(func):
kwargs['process'] = func
job = type(job_name, (Job,), kwargs)
globals()[job_name] = job
return job
return wraps | [
"def",
"make_job",
"(",
"job_name",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"wraps",
"(",
"func",
")",
":",
"kwargs",
"[",
"'process'",
"]",
"=",
"func",
"job",
"=",
"type",
"(",
"job_name",
",",
"(",
"Job",
",",
")",
",",
"kwargs",
")",
"globals",
"(",
")",
"[",
"job_name",
"]",
"=",
"job",
"return",
"job",
"return",
"wraps"
]
| 38.15 | 19.85 |
def _ProcessFileSource(self, source):
"""Glob paths and return StatEntry objects."""
if source.path_type != rdf_paths.PathSpec.PathType.OS:
raise ValueError("Only supported path type is OS.")
paths = artifact_utils.InterpolateListKbAttributes(
source.base_source.attributes["paths"], self.knowledge_base,
self.ignore_interpolation_errors)
file_finder_action = rdf_file_finder.FileFinderAction.Stat()
request = rdf_file_finder.FileFinderArgs(
paths=paths, pathtype=source.path_type, action=file_finder_action)
action = file_finder.FileFinderOSFromClient
yield action, request | [
"def",
"_ProcessFileSource",
"(",
"self",
",",
"source",
")",
":",
"if",
"source",
".",
"path_type",
"!=",
"rdf_paths",
".",
"PathSpec",
".",
"PathType",
".",
"OS",
":",
"raise",
"ValueError",
"(",
"\"Only supported path type is OS.\"",
")",
"paths",
"=",
"artifact_utils",
".",
"InterpolateListKbAttributes",
"(",
"source",
".",
"base_source",
".",
"attributes",
"[",
"\"paths\"",
"]",
",",
"self",
".",
"knowledge_base",
",",
"self",
".",
"ignore_interpolation_errors",
")",
"file_finder_action",
"=",
"rdf_file_finder",
".",
"FileFinderAction",
".",
"Stat",
"(",
")",
"request",
"=",
"rdf_file_finder",
".",
"FileFinderArgs",
"(",
"paths",
"=",
"paths",
",",
"pathtype",
"=",
"source",
".",
"path_type",
",",
"action",
"=",
"file_finder_action",
")",
"action",
"=",
"file_finder",
".",
"FileFinderOSFromClient",
"yield",
"action",
",",
"request"
]
| 38.8125 | 20.4375 |
def delete_set(self, x):
"""Removes the equivalence class containing `x`."""
if x not in self._parents:
return
members = list(self.members(x))
for v in members:
del self._parents[v]
del self._weights[v]
del self._prev_next[v]
del self._min_values[v] | [
"def",
"delete_set",
"(",
"self",
",",
"x",
")",
":",
"if",
"x",
"not",
"in",
"self",
".",
"_parents",
":",
"return",
"members",
"=",
"list",
"(",
"self",
".",
"members",
"(",
"x",
")",
")",
"for",
"v",
"in",
"members",
":",
"del",
"self",
".",
"_parents",
"[",
"v",
"]",
"del",
"self",
".",
"_weights",
"[",
"v",
"]",
"del",
"self",
".",
"_prev_next",
"[",
"v",
"]",
"del",
"self",
".",
"_min_values",
"[",
"v",
"]"
]
| 33.2 | 8.7 |
def append_partition_by_name(self, db_name, tbl_name, part_name):
"""
Parameters:
- db_name
- tbl_name
- part_name
"""
self.send_append_partition_by_name(db_name, tbl_name, part_name)
return self.recv_append_partition_by_name() | [
"def",
"append_partition_by_name",
"(",
"self",
",",
"db_name",
",",
"tbl_name",
",",
"part_name",
")",
":",
"self",
".",
"send_append_partition_by_name",
"(",
"db_name",
",",
"tbl_name",
",",
"part_name",
")",
"return",
"self",
".",
"recv_append_partition_by_name",
"(",
")"
]
| 28.222222 | 17.777778 |
def log(self, message, level=None):
""" Write a message to log """
if level is None:
level = logging.INFO
current_app.logger.log(msg=message, level=level) | [
"def",
"log",
"(",
"self",
",",
"message",
",",
"level",
"=",
"None",
")",
":",
"if",
"level",
"is",
"None",
":",
"level",
"=",
"logging",
".",
"INFO",
"current_app",
".",
"logger",
".",
"log",
"(",
"msg",
"=",
"message",
",",
"level",
"=",
"level",
")"
]
| 31 | 14 |
def start_blocking(self):
""" Start the advertiser in the background, but wait until it is ready """
self._cav_started.clear()
self.start()
self._cav_started.wait() | [
"def",
"start_blocking",
"(",
"self",
")",
":",
"self",
".",
"_cav_started",
".",
"clear",
"(",
")",
"self",
".",
"start",
"(",
")",
"self",
".",
"_cav_started",
".",
"wait",
"(",
")"
]
| 32 | 15 |
def finalize_structure(self):
"""Any functions needed to cleanup the structure."""
self.group_list.append(self.current_group)
group_set = get_unique_groups(self.group_list)
for item in self.group_list:
self.group_type_list.append(group_set.index(item))
self.group_list = [x.convert_to_dict() for x in group_set] | [
"def",
"finalize_structure",
"(",
"self",
")",
":",
"self",
".",
"group_list",
".",
"append",
"(",
"self",
".",
"current_group",
")",
"group_set",
"=",
"get_unique_groups",
"(",
"self",
".",
"group_list",
")",
"for",
"item",
"in",
"self",
".",
"group_list",
":",
"self",
".",
"group_type_list",
".",
"append",
"(",
"group_set",
".",
"index",
"(",
"item",
")",
")",
"self",
".",
"group_list",
"=",
"[",
"x",
".",
"convert_to_dict",
"(",
")",
"for",
"x",
"in",
"group_set",
"]"
]
| 51 | 12.428571 |
def users_register(self, email, name, password, username, **kwargs):
"""Register a new user."""
return self.__call_api_post('users.register', email=email, name=name, password=password, username=username,
kwargs=kwargs) | [
"def",
"users_register",
"(",
"self",
",",
"email",
",",
"name",
",",
"password",
",",
"username",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"__call_api_post",
"(",
"'users.register'",
",",
"email",
"=",
"email",
",",
"name",
"=",
"name",
",",
"password",
"=",
"password",
",",
"username",
"=",
"username",
",",
"kwargs",
"=",
"kwargs",
")"
]
| 66.75 | 28.25 |
def copy(self, name=None):
"""
shallow copy of the instruction.
Args:
name (str): name to be given to the copied circuit,
if None then the name stays the same
Returns:
Instruction: a shallow copy of the current instruction, with the name
updated if it was provided
"""
cpy = copy.copy(self)
if name:
cpy.name = name
return cpy | [
"def",
"copy",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"cpy",
"=",
"copy",
".",
"copy",
"(",
"self",
")",
"if",
"name",
":",
"cpy",
".",
"name",
"=",
"name",
"return",
"cpy"
]
| 27.0625 | 17.8125 |
def list_view_changed(self, widget, event, data=None):
"""
Function shows last rows.
"""
adj = self.scrolled_window.get_vadjustment()
adj.set_value(adj.get_upper() - adj.get_page_size()) | [
"def",
"list_view_changed",
"(",
"self",
",",
"widget",
",",
"event",
",",
"data",
"=",
"None",
")",
":",
"adj",
"=",
"self",
".",
"scrolled_window",
".",
"get_vadjustment",
"(",
")",
"adj",
".",
"set_value",
"(",
"adj",
".",
"get_upper",
"(",
")",
"-",
"adj",
".",
"get_page_size",
"(",
")",
")"
]
| 36.833333 | 8.833333 |
def registration_packet(self):
"""Serialize this into a tuple suitable for returning from an RPC.
Returns:
tuple: The serialized values.
"""
return (self.hw_type, self.api_info[0], self.api_info[1], self.name, self.fw_info[0], self.fw_info[1], self.fw_info[2],
self.exec_info[0], self.exec_info[0], self.exec_info[0], self.slot, self.unique_id) | [
"def",
"registration_packet",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"hw_type",
",",
"self",
".",
"api_info",
"[",
"0",
"]",
",",
"self",
".",
"api_info",
"[",
"1",
"]",
",",
"self",
".",
"name",
",",
"self",
".",
"fw_info",
"[",
"0",
"]",
",",
"self",
".",
"fw_info",
"[",
"1",
"]",
",",
"self",
".",
"fw_info",
"[",
"2",
"]",
",",
"self",
".",
"exec_info",
"[",
"0",
"]",
",",
"self",
".",
"exec_info",
"[",
"0",
"]",
",",
"self",
".",
"exec_info",
"[",
"0",
"]",
",",
"self",
".",
"slot",
",",
"self",
".",
"unique_id",
")"
]
| 44.222222 | 29 |
def _mkpart(root, fs_format, fs_opts, mount_dir):
'''
Make a partition, and make it bootable
.. versionadded:: Beryllium
'''
__salt__['partition.mklabel'](root, 'msdos')
loop1 = __salt__['cmd.run']('losetup -f')
log.debug('First loop device is %s', loop1)
__salt__['cmd.run']('losetup {0} {1}'.format(loop1, root))
part_info = __salt__['partition.list'](loop1)
start = six.text_type(2048 * 2048) + 'B'
end = part_info['info']['size']
__salt__['partition.mkpart'](loop1, 'primary', start=start, end=end)
__salt__['partition.set'](loop1, '1', 'boot', 'on')
part_info = __salt__['partition.list'](loop1)
loop2 = __salt__['cmd.run']('losetup -f')
log.debug('Second loop device is %s', loop2)
start = start.rstrip('B')
__salt__['cmd.run']('losetup -o {0} {1} {2}'.format(start, loop2, loop1))
_mkfs(loop2, fs_format, fs_opts)
__salt__['mount.mount'](mount_dir, loop2)
__salt__['cmd.run']((
'grub-install',
'--target=i386-pc',
'--debug',
'--no-floppy',
'--modules=part_msdos linux',
'--boot-directory={0}/boot'.format(mount_dir),
loop1
), python_shell=False)
__salt__['mount.umount'](mount_dir)
__salt__['cmd.run']('losetup -d {0}'.format(loop2))
__salt__['cmd.run']('losetup -d {0}'.format(loop1))
return part_info | [
"def",
"_mkpart",
"(",
"root",
",",
"fs_format",
",",
"fs_opts",
",",
"mount_dir",
")",
":",
"__salt__",
"[",
"'partition.mklabel'",
"]",
"(",
"root",
",",
"'msdos'",
")",
"loop1",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'losetup -f'",
")",
"log",
".",
"debug",
"(",
"'First loop device is %s'",
",",
"loop1",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'losetup {0} {1}'",
".",
"format",
"(",
"loop1",
",",
"root",
")",
")",
"part_info",
"=",
"__salt__",
"[",
"'partition.list'",
"]",
"(",
"loop1",
")",
"start",
"=",
"six",
".",
"text_type",
"(",
"2048",
"*",
"2048",
")",
"+",
"'B'",
"end",
"=",
"part_info",
"[",
"'info'",
"]",
"[",
"'size'",
"]",
"__salt__",
"[",
"'partition.mkpart'",
"]",
"(",
"loop1",
",",
"'primary'",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
")",
"__salt__",
"[",
"'partition.set'",
"]",
"(",
"loop1",
",",
"'1'",
",",
"'boot'",
",",
"'on'",
")",
"part_info",
"=",
"__salt__",
"[",
"'partition.list'",
"]",
"(",
"loop1",
")",
"loop2",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'losetup -f'",
")",
"log",
".",
"debug",
"(",
"'Second loop device is %s'",
",",
"loop2",
")",
"start",
"=",
"start",
".",
"rstrip",
"(",
"'B'",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'losetup -o {0} {1} {2}'",
".",
"format",
"(",
"start",
",",
"loop2",
",",
"loop1",
")",
")",
"_mkfs",
"(",
"loop2",
",",
"fs_format",
",",
"fs_opts",
")",
"__salt__",
"[",
"'mount.mount'",
"]",
"(",
"mount_dir",
",",
"loop2",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"(",
"'grub-install'",
",",
"'--target=i386-pc'",
",",
"'--debug'",
",",
"'--no-floppy'",
",",
"'--modules=part_msdos linux'",
",",
"'--boot-directory={0}/boot'",
".",
"format",
"(",
"mount_dir",
")",
",",
"loop1",
")",
",",
"python_shell",
"=",
"False",
")",
"__salt__",
"[",
"'mount.umount'",
"]",
"(",
"mount_dir",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'losetup -d {0}'",
".",
"format",
"(",
"loop2",
")",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'losetup -d {0}'",
".",
"format",
"(",
"loop1",
")",
")",
"return",
"part_info"
]
| 38.171429 | 14.457143 |
def required_length(nmin, nmax):
"""
For use with argparse's action argument. Allows setting a range for nargs.
Example: nargs='+', action=required_length(2, 3)
:param int nmin: Minimum number of arguments
:param int nmax: Maximum number of arguments
:return: RequiredLength object
"""
class RequiredLength(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if not nmin <= len(values) <= nmax:
msg = 'argument "{f}" requires between {nmin} and {nmax} arguments'.format(
f=self.dest, nmin=nmin, nmax=nmax)
raise argparse.ArgumentTypeError(msg)
setattr(args, self.dest, values)
return RequiredLength | [
"def",
"required_length",
"(",
"nmin",
",",
"nmax",
")",
":",
"class",
"RequiredLength",
"(",
"argparse",
".",
"Action",
")",
":",
"def",
"__call__",
"(",
"self",
",",
"parser",
",",
"args",
",",
"values",
",",
"option_string",
"=",
"None",
")",
":",
"if",
"not",
"nmin",
"<=",
"len",
"(",
"values",
")",
"<=",
"nmax",
":",
"msg",
"=",
"'argument \"{f}\" requires between {nmin} and {nmax} arguments'",
".",
"format",
"(",
"f",
"=",
"self",
".",
"dest",
",",
"nmin",
"=",
"nmin",
",",
"nmax",
"=",
"nmax",
")",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"msg",
")",
"setattr",
"(",
"args",
",",
"self",
".",
"dest",
",",
"values",
")",
"return",
"RequiredLength"
]
| 43 | 15 |
def _update_index(self, url=None):
"""A helper function that ensures that self._index is
up-to-date. If the index is older than self.INDEX_TIMEOUT,
then download it again."""
# Check if the index is aleady up-to-date. If so, do nothing.
if not (self._index is None or url is not None or
time.time()-self._index_timestamp > self.INDEX_TIMEOUT):
return
# If a URL was specified, then update our URL.
self._url = url or self._url
source = self._source
assert source == 'google' or source == 'mirror'
# Download the index file.
if source == 'google':
host = "www.googleapis.com"
conn = HTTPSConnection(host)
conn.request("GET", "/storage/v1/b/{}/o".format(self._url))
r1 = conn.getresponse()
data = r1.read()
elif source == 'mirror':
index_url = path.join(self._url, 'index.json')
data = urlopen(index_url).read()
if six.PY3:
data = data.decode('utf-8')
data = loads(data)
objs = data["items"]
self._index_timestamp = time.time()
# Build a dictionary of packages.
packages = []
for p in objs:
P = Package.fromcsobj(p)
packages.append(P)
self._packages = dict((p.id, p) for p in packages)
# Build language collections.
langs = defaultdict(lambda: [])
for k in self._packages:
package = self._packages[k]
langs[package.language].append(package)
tasks = defaultdict(lambda: [])
for k in self._packages:
package = self._packages[k]
tasks[package.task].append(package)
collections = []
for lang in langs:
children = langs[lang]
name1 = Locale(lang).getDisplayLanguage()
try:
name2 = isoLangs[lang]['name']
except:
name2 = None
if name1 and name1 != lang:
name = name1
elif name2:
name = name2
else:
name = lang
id = "{}{}".format(Downloader.LANG_PREFIX, lang)
name = "{:<20} packages and models".format(name)
c = Collection(id=id, name=name, children=children)
collections.append(c)
for task in tasks:
children = tasks[task]
id = "{}{}".format(Downloader.TASK_PREFIX, task)
c = Collection(id=id, name=task, children=children)
collections.append(c)
self._collections = dict((c.id, c) for c in collections)
# Replace identifiers with actual children in collection.children.
for collection in self._collections.values():
for i, child_id in enumerate(collection.children):
if child_id in self._packages:
collection.children[i] = self._packages[child_id]
if child_id in self._collections:
collection.children[i] = self._collections[child_id]
# Fill in collection.packages for each collection.
for collection in self._collections.values():
packages = {}
queue = [collection]
for child in queue:
if isinstance(child, Collection):
queue.extend(child.children)
else:
packages[child.id] = child
collection.packages = packages.values()
# Flush the status cache
self._status_cache.clear() | [
"def",
"_update_index",
"(",
"self",
",",
"url",
"=",
"None",
")",
":",
"# Check if the index is aleady up-to-date. If so, do nothing.",
"if",
"not",
"(",
"self",
".",
"_index",
"is",
"None",
"or",
"url",
"is",
"not",
"None",
"or",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"_index_timestamp",
">",
"self",
".",
"INDEX_TIMEOUT",
")",
":",
"return",
"# If a URL was specified, then update our URL.",
"self",
".",
"_url",
"=",
"url",
"or",
"self",
".",
"_url",
"source",
"=",
"self",
".",
"_source",
"assert",
"source",
"==",
"'google'",
"or",
"source",
"==",
"'mirror'",
"# Download the index file.",
"if",
"source",
"==",
"'google'",
":",
"host",
"=",
"\"www.googleapis.com\"",
"conn",
"=",
"HTTPSConnection",
"(",
"host",
")",
"conn",
".",
"request",
"(",
"\"GET\"",
",",
"\"/storage/v1/b/{}/o\"",
".",
"format",
"(",
"self",
".",
"_url",
")",
")",
"r1",
"=",
"conn",
".",
"getresponse",
"(",
")",
"data",
"=",
"r1",
".",
"read",
"(",
")",
"elif",
"source",
"==",
"'mirror'",
":",
"index_url",
"=",
"path",
".",
"join",
"(",
"self",
".",
"_url",
",",
"'index.json'",
")",
"data",
"=",
"urlopen",
"(",
"index_url",
")",
".",
"read",
"(",
")",
"if",
"six",
".",
"PY3",
":",
"data",
"=",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
"data",
"=",
"loads",
"(",
"data",
")",
"objs",
"=",
"data",
"[",
"\"items\"",
"]",
"self",
".",
"_index_timestamp",
"=",
"time",
".",
"time",
"(",
")",
"# Build a dictionary of packages.",
"packages",
"=",
"[",
"]",
"for",
"p",
"in",
"objs",
":",
"P",
"=",
"Package",
".",
"fromcsobj",
"(",
"p",
")",
"packages",
".",
"append",
"(",
"P",
")",
"self",
".",
"_packages",
"=",
"dict",
"(",
"(",
"p",
".",
"id",
",",
"p",
")",
"for",
"p",
"in",
"packages",
")",
"# Build language collections.",
"langs",
"=",
"defaultdict",
"(",
"lambda",
":",
"[",
"]",
")",
"for",
"k",
"in",
"self",
".",
"_packages",
":",
"package",
"=",
"self",
".",
"_packages",
"[",
"k",
"]",
"langs",
"[",
"package",
".",
"language",
"]",
".",
"append",
"(",
"package",
")",
"tasks",
"=",
"defaultdict",
"(",
"lambda",
":",
"[",
"]",
")",
"for",
"k",
"in",
"self",
".",
"_packages",
":",
"package",
"=",
"self",
".",
"_packages",
"[",
"k",
"]",
"tasks",
"[",
"package",
".",
"task",
"]",
".",
"append",
"(",
"package",
")",
"collections",
"=",
"[",
"]",
"for",
"lang",
"in",
"langs",
":",
"children",
"=",
"langs",
"[",
"lang",
"]",
"name1",
"=",
"Locale",
"(",
"lang",
")",
".",
"getDisplayLanguage",
"(",
")",
"try",
":",
"name2",
"=",
"isoLangs",
"[",
"lang",
"]",
"[",
"'name'",
"]",
"except",
":",
"name2",
"=",
"None",
"if",
"name1",
"and",
"name1",
"!=",
"lang",
":",
"name",
"=",
"name1",
"elif",
"name2",
":",
"name",
"=",
"name2",
"else",
":",
"name",
"=",
"lang",
"id",
"=",
"\"{}{}\"",
".",
"format",
"(",
"Downloader",
".",
"LANG_PREFIX",
",",
"lang",
")",
"name",
"=",
"\"{:<20} packages and models\"",
".",
"format",
"(",
"name",
")",
"c",
"=",
"Collection",
"(",
"id",
"=",
"id",
",",
"name",
"=",
"name",
",",
"children",
"=",
"children",
")",
"collections",
".",
"append",
"(",
"c",
")",
"for",
"task",
"in",
"tasks",
":",
"children",
"=",
"tasks",
"[",
"task",
"]",
"id",
"=",
"\"{}{}\"",
".",
"format",
"(",
"Downloader",
".",
"TASK_PREFIX",
",",
"task",
")",
"c",
"=",
"Collection",
"(",
"id",
"=",
"id",
",",
"name",
"=",
"task",
",",
"children",
"=",
"children",
")",
"collections",
".",
"append",
"(",
"c",
")",
"self",
".",
"_collections",
"=",
"dict",
"(",
"(",
"c",
".",
"id",
",",
"c",
")",
"for",
"c",
"in",
"collections",
")",
"# Replace identifiers with actual children in collection.children.",
"for",
"collection",
"in",
"self",
".",
"_collections",
".",
"values",
"(",
")",
":",
"for",
"i",
",",
"child_id",
"in",
"enumerate",
"(",
"collection",
".",
"children",
")",
":",
"if",
"child_id",
"in",
"self",
".",
"_packages",
":",
"collection",
".",
"children",
"[",
"i",
"]",
"=",
"self",
".",
"_packages",
"[",
"child_id",
"]",
"if",
"child_id",
"in",
"self",
".",
"_collections",
":",
"collection",
".",
"children",
"[",
"i",
"]",
"=",
"self",
".",
"_collections",
"[",
"child_id",
"]",
"# Fill in collection.packages for each collection.",
"for",
"collection",
"in",
"self",
".",
"_collections",
".",
"values",
"(",
")",
":",
"packages",
"=",
"{",
"}",
"queue",
"=",
"[",
"collection",
"]",
"for",
"child",
"in",
"queue",
":",
"if",
"isinstance",
"(",
"child",
",",
"Collection",
")",
":",
"queue",
".",
"extend",
"(",
"child",
".",
"children",
")",
"else",
":",
"packages",
"[",
"child",
".",
"id",
"]",
"=",
"child",
"collection",
".",
"packages",
"=",
"packages",
".",
"values",
"(",
")",
"# Flush the status cache",
"self",
".",
"_status_cache",
".",
"clear",
"(",
")"
]
| 29.228571 | 18.4 |
def save_persistent_attributes(self):
# type: () -> None
"""Save persistent attributes to the persistence layer if a
persistence adapter is provided.
:rtype: None
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to save persistence attributes without persistence adapter
"""
if not self._persistence_adapter:
raise AttributesManagerException(
"Cannot save PersistentAttributes without "
"persistence adapter!")
if self._persistent_attributes_set:
self._persistence_adapter.save_attributes(
request_envelope=self._request_envelope,
attributes=self._persistence_attributes) | [
"def",
"save_persistent_attributes",
"(",
"self",
")",
":",
"# type: () -> None",
"if",
"not",
"self",
".",
"_persistence_adapter",
":",
"raise",
"AttributesManagerException",
"(",
"\"Cannot save PersistentAttributes without \"",
"\"persistence adapter!\"",
")",
"if",
"self",
".",
"_persistent_attributes_set",
":",
"self",
".",
"_persistence_adapter",
".",
"save_attributes",
"(",
"request_envelope",
"=",
"self",
".",
"_request_envelope",
",",
"attributes",
"=",
"self",
".",
"_persistence_attributes",
")"
]
| 44.294118 | 13.588235 |
def nsx_controller_connection_addr_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
nsx_controller = ET.SubElement(config, "nsx-controller", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(nsx_controller, "name")
name_key.text = kwargs.pop('name')
connection_addr = ET.SubElement(nsx_controller, "connection-addr")
address = ET.SubElement(connection_addr, "address")
address.text = kwargs.pop('address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"nsx_controller_connection_addr_address",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"nsx_controller",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"nsx-controller\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-tunnels\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"nsx_controller",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"connection_addr",
"=",
"ET",
".",
"SubElement",
"(",
"nsx_controller",
",",
"\"connection-addr\"",
")",
"address",
"=",
"ET",
".",
"SubElement",
"(",
"connection_addr",
",",
"\"address\"",
")",
"address",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'address'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
]
| 46.923077 | 17.923077 |
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc) | [
"def",
"regexp_replace",
"(",
"str",
",",
"pattern",
",",
"replacement",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"jc",
"=",
"sc",
".",
"_jvm",
".",
"functions",
".",
"regexp_replace",
"(",
"_to_java_column",
"(",
"str",
")",
",",
"pattern",
",",
"replacement",
")",
"return",
"Column",
"(",
"jc",
")"
]
| 44.7 | 18.8 |
def clear(self, username, project):
"""Clear the cache for given project."""
method = 'DELETE'
url = ('/project/{username}/{project}/build-cache?'
'circle-token={token}'.format(username=username,
project=project,
token=self.client.api_token))
json_data = self.client.request(method, url)
return json_data | [
"def",
"clear",
"(",
"self",
",",
"username",
",",
"project",
")",
":",
"method",
"=",
"'DELETE'",
"url",
"=",
"(",
"'/project/{username}/{project}/build-cache?'",
"'circle-token={token}'",
".",
"format",
"(",
"username",
"=",
"username",
",",
"project",
"=",
"project",
",",
"token",
"=",
"self",
".",
"client",
".",
"api_token",
")",
")",
"json_data",
"=",
"self",
".",
"client",
".",
"request",
"(",
"method",
",",
"url",
")",
"return",
"json_data"
]
| 49 | 16.111111 |
def _get_context_and_user_ids(self, context_name, user_name):
"""Helper to get the context ID and user ID from the given names."""
if context_name is None:
return None, None
context_id = self.get_context_info(context_name)['id']
user_id = None
if user_name:
user_id = self._get_user_id_from_name(context_id, user_name)
return context_id, user_id | [
"def",
"_get_context_and_user_ids",
"(",
"self",
",",
"context_name",
",",
"user_name",
")",
":",
"if",
"context_name",
"is",
"None",
":",
"return",
"None",
",",
"None",
"context_id",
"=",
"self",
".",
"get_context_info",
"(",
"context_name",
")",
"[",
"'id'",
"]",
"user_id",
"=",
"None",
"if",
"user_name",
":",
"user_id",
"=",
"self",
".",
"_get_user_id_from_name",
"(",
"context_id",
",",
"user_name",
")",
"return",
"context_id",
",",
"user_id"
]
| 37.181818 | 19.727273 |
def on(self, events, callback, times=None):
"""
Registers the given *callback* with the given *events* (string or list
of strings) that will get called whenever the given *event* is triggered
(using :meth:`self.trigger`).
If *times* is given the *callback* will only be fired that many times
before it is automatically removed from :attr:`self._on_off_events`.
"""
# Make sure our _on_off_events dict is present (if first invokation)
if not hasattr(self, '_on_off_events'):
self._on_off_events = {}
if isinstance(events, (str, unicode)):
events = [events]
callback_obj = {
'callback': callback,
'times': times,
'calls': 0
}
for event in events:
if event not in self._on_off_events:
self._on_off_events.update({event: [callback_obj.copy()]})
else:
self._on_off_events[event].append(callback_obj.copy()) | [
"def",
"on",
"(",
"self",
",",
"events",
",",
"callback",
",",
"times",
"=",
"None",
")",
":",
"# Make sure our _on_off_events dict is present (if first invokation)",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_on_off_events'",
")",
":",
"self",
".",
"_on_off_events",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"events",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"events",
"=",
"[",
"events",
"]",
"callback_obj",
"=",
"{",
"'callback'",
":",
"callback",
",",
"'times'",
":",
"times",
",",
"'calls'",
":",
"0",
"}",
"for",
"event",
"in",
"events",
":",
"if",
"event",
"not",
"in",
"self",
".",
"_on_off_events",
":",
"self",
".",
"_on_off_events",
".",
"update",
"(",
"{",
"event",
":",
"[",
"callback_obj",
".",
"copy",
"(",
")",
"]",
"}",
")",
"else",
":",
"self",
".",
"_on_off_events",
"[",
"event",
"]",
".",
"append",
"(",
"callback_obj",
".",
"copy",
"(",
")",
")"
]
| 41.625 | 18.875 |
def prt_outfiles_flat(self, goea_results, outfiles):
"""Write to outfiles."""
kws = {'indent':self.args.indent, 'itemid2name':self.itemid2name}
for outfile in outfiles:
if outfile.endswith(".xlsx"):
self.objgoea.wr_xlsx(outfile, goea_results, **kws)
#elif outfile.endswith(".txt"): # TBD
# pass
else:
self.objgoea.wr_tsv(outfile, goea_results, **kws) | [
"def",
"prt_outfiles_flat",
"(",
"self",
",",
"goea_results",
",",
"outfiles",
")",
":",
"kws",
"=",
"{",
"'indent'",
":",
"self",
".",
"args",
".",
"indent",
",",
"'itemid2name'",
":",
"self",
".",
"itemid2name",
"}",
"for",
"outfile",
"in",
"outfiles",
":",
"if",
"outfile",
".",
"endswith",
"(",
"\".xlsx\"",
")",
":",
"self",
".",
"objgoea",
".",
"wr_xlsx",
"(",
"outfile",
",",
"goea_results",
",",
"*",
"*",
"kws",
")",
"#elif outfile.endswith(\".txt\"): # TBD",
"# pass",
"else",
":",
"self",
".",
"objgoea",
".",
"wr_tsv",
"(",
"outfile",
",",
"goea_results",
",",
"*",
"*",
"kws",
")"
]
| 44.9 | 15.7 |
def _function(self, x, a, b, c, d, s):
"""Lorentzian asymmetric function
x: frequency coordinate
a: peak position
b: half width
c: area proportional parameter
d: base line
s: asymmetry parameter
"""
return c/(np.pi*self._g_a(x, a, b, s)*(1.0+((x-a)/(self._g_a(x, a, b, s)))**2))+d | [
"def",
"_function",
"(",
"self",
",",
"x",
",",
"a",
",",
"b",
",",
"c",
",",
"d",
",",
"s",
")",
":",
"return",
"c",
"/",
"(",
"np",
".",
"pi",
"*",
"self",
".",
"_g_a",
"(",
"x",
",",
"a",
",",
"b",
",",
"s",
")",
"*",
"(",
"1.0",
"+",
"(",
"(",
"x",
"-",
"a",
")",
"/",
"(",
"self",
".",
"_g_a",
"(",
"x",
",",
"a",
",",
"b",
",",
"s",
")",
")",
")",
"**",
"2",
")",
")",
"+",
"d"
]
| 34.3 | 12.7 |
def chmod_plus(path, add_bits=stat.S_IWUSR):
"""Change a file's mode by adding a few bits.
Like chmod +<bits> <path> in a Unix shell.
"""
try:
os.chmod(path, stat.S_IMODE(os.stat(path).st_mode) | add_bits)
except OSError: # pragma: nocover
pass | [
"def",
"chmod_plus",
"(",
"path",
",",
"add_bits",
"=",
"stat",
".",
"S_IWUSR",
")",
":",
"try",
":",
"os",
".",
"chmod",
"(",
"path",
",",
"stat",
".",
"S_IMODE",
"(",
"os",
".",
"stat",
"(",
"path",
")",
".",
"st_mode",
")",
"|",
"add_bits",
")",
"except",
"OSError",
":",
"# pragma: nocover",
"pass"
]
| 30.444444 | 15.777778 |
def save(self, commit=True):
'''
If a staff member is reporting substitute teaching for a second time, then we should update
the list of occurrences for which they are a substitute on their existing EventStaffMember
record, rather than creating a new record and creating database issues.
'''
existing_record = EventStaffMember.objects.filter(
staffMember=self.cleaned_data.get('staffMember'),
event=self.cleaned_data.get('event'),
category=getConstant('general__eventStaffCategorySubstitute'),
replacedStaffMember=self.cleaned_data.get('replacedStaffMember'),
)
if existing_record.exists():
record = existing_record.first()
for x in self.cleaned_data.get('occurrences'):
record.occurrences.add(x)
record.save()
return record
else:
return super(SubstituteReportingForm,self).save() | [
"def",
"save",
"(",
"self",
",",
"commit",
"=",
"True",
")",
":",
"existing_record",
"=",
"EventStaffMember",
".",
"objects",
".",
"filter",
"(",
"staffMember",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'staffMember'",
")",
",",
"event",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'event'",
")",
",",
"category",
"=",
"getConstant",
"(",
"'general__eventStaffCategorySubstitute'",
")",
",",
"replacedStaffMember",
"=",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'replacedStaffMember'",
")",
",",
")",
"if",
"existing_record",
".",
"exists",
"(",
")",
":",
"record",
"=",
"existing_record",
".",
"first",
"(",
")",
"for",
"x",
"in",
"self",
".",
"cleaned_data",
".",
"get",
"(",
"'occurrences'",
")",
":",
"record",
".",
"occurrences",
".",
"add",
"(",
"x",
")",
"record",
".",
"save",
"(",
")",
"return",
"record",
"else",
":",
"return",
"super",
"(",
"SubstituteReportingForm",
",",
"self",
")",
".",
"save",
"(",
")"
]
| 47.85 | 24.05 |
def plot_seebeck_eff_mass_mu(self, temps=[300], output='average',
Lambda=0.5):
"""
Plot respect to the chemical potential of the Seebeck effective mass
calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the seebeck effective mass calculated
using the average of the three diagonal components of the
seebeck tensor. 'tensor' returns the seebeck effective mass
respect to the three diagonal components of the seebeck tensor.
temps: list of temperatures of calculated seebeck.
Lambda: fitting parameter used to model the scattering (0.5 means
constant relaxation time).
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
for T in temps:
sbk_mass = self._bz.get_seebeck_eff_mass(output=output, temp=T,
Lambda=0.5)
# remove noise inside the gap
start = self._bz.mu_doping['p'][T][0]
stop = self._bz.mu_doping['n'][T][0]
mu_steps_1 = []
mu_steps_2 = []
sbk_mass_1 = []
sbk_mass_2 = []
for i, mu in enumerate(self._bz.mu_steps):
if mu <= start:
mu_steps_1.append(mu)
sbk_mass_1.append(sbk_mass[i])
elif mu >= stop:
mu_steps_2.append(mu)
sbk_mass_2.append(sbk_mass[i])
plt.plot(mu_steps_1, sbk_mass_1, label=str(T) + 'K', linewidth=3.0)
plt.plot(mu_steps_2, sbk_mass_2, linewidth=3.0)
if output == 'average':
plt.gca().get_lines()[1].set_c(plt.gca().get_lines()[0].get_c())
elif output == 'tensor':
plt.gca().get_lines()[3].set_c(plt.gca().get_lines()[0].get_c())
plt.gca().get_lines()[4].set_c(plt.gca().get_lines()[1].get_c())
plt.gca().get_lines()[5].set_c(plt.gca().get_lines()[2].get_c())
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.ylabel("Seebeck effective mass", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
if output == 'tensor':
plt.legend([str(i) + '_' + str(T) + 'K' for T in temps for i in
('x', 'y', 'z')], fontsize=20)
elif output == 'average':
plt.legend(fontsize=20)
plt.tight_layout()
return plt | [
"def",
"plot_seebeck_eff_mass_mu",
"(",
"self",
",",
"temps",
"=",
"[",
"300",
"]",
",",
"output",
"=",
"'average'",
",",
"Lambda",
"=",
"0.5",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"9",
",",
"7",
")",
")",
"for",
"T",
"in",
"temps",
":",
"sbk_mass",
"=",
"self",
".",
"_bz",
".",
"get_seebeck_eff_mass",
"(",
"output",
"=",
"output",
",",
"temp",
"=",
"T",
",",
"Lambda",
"=",
"0.5",
")",
"# remove noise inside the gap",
"start",
"=",
"self",
".",
"_bz",
".",
"mu_doping",
"[",
"'p'",
"]",
"[",
"T",
"]",
"[",
"0",
"]",
"stop",
"=",
"self",
".",
"_bz",
".",
"mu_doping",
"[",
"'n'",
"]",
"[",
"T",
"]",
"[",
"0",
"]",
"mu_steps_1",
"=",
"[",
"]",
"mu_steps_2",
"=",
"[",
"]",
"sbk_mass_1",
"=",
"[",
"]",
"sbk_mass_2",
"=",
"[",
"]",
"for",
"i",
",",
"mu",
"in",
"enumerate",
"(",
"self",
".",
"_bz",
".",
"mu_steps",
")",
":",
"if",
"mu",
"<=",
"start",
":",
"mu_steps_1",
".",
"append",
"(",
"mu",
")",
"sbk_mass_1",
".",
"append",
"(",
"sbk_mass",
"[",
"i",
"]",
")",
"elif",
"mu",
">=",
"stop",
":",
"mu_steps_2",
".",
"append",
"(",
"mu",
")",
"sbk_mass_2",
".",
"append",
"(",
"sbk_mass",
"[",
"i",
"]",
")",
"plt",
".",
"plot",
"(",
"mu_steps_1",
",",
"sbk_mass_1",
",",
"label",
"=",
"str",
"(",
"T",
")",
"+",
"'K'",
",",
"linewidth",
"=",
"3.0",
")",
"plt",
".",
"plot",
"(",
"mu_steps_2",
",",
"sbk_mass_2",
",",
"linewidth",
"=",
"3.0",
")",
"if",
"output",
"==",
"'average'",
":",
"plt",
".",
"gca",
"(",
")",
".",
"get_lines",
"(",
")",
"[",
"1",
"]",
".",
"set_c",
"(",
"plt",
".",
"gca",
"(",
")",
".",
"get_lines",
"(",
")",
"[",
"0",
"]",
".",
"get_c",
"(",
")",
")",
"elif",
"output",
"==",
"'tensor'",
":",
"plt",
".",
"gca",
"(",
")",
".",
"get_lines",
"(",
")",
"[",
"3",
"]",
".",
"set_c",
"(",
"plt",
".",
"gca",
"(",
")",
".",
"get_lines",
"(",
")",
"[",
"0",
"]",
".",
"get_c",
"(",
")",
")",
"plt",
".",
"gca",
"(",
")",
".",
"get_lines",
"(",
")",
"[",
"4",
"]",
".",
"set_c",
"(",
"plt",
".",
"gca",
"(",
")",
".",
"get_lines",
"(",
")",
"[",
"1",
"]",
".",
"get_c",
"(",
")",
")",
"plt",
".",
"gca",
"(",
")",
".",
"get_lines",
"(",
")",
"[",
"5",
"]",
".",
"set_c",
"(",
"plt",
".",
"gca",
"(",
")",
".",
"get_lines",
"(",
")",
"[",
"2",
"]",
".",
"get_c",
"(",
")",
")",
"plt",
".",
"xlabel",
"(",
"\"E-E$_f$ (eV)\"",
",",
"fontsize",
"=",
"30",
")",
"plt",
".",
"ylabel",
"(",
"\"Seebeck effective mass\"",
",",
"fontsize",
"=",
"30",
")",
"plt",
".",
"xticks",
"(",
"fontsize",
"=",
"25",
")",
"plt",
".",
"yticks",
"(",
"fontsize",
"=",
"25",
")",
"if",
"output",
"==",
"'tensor'",
":",
"plt",
".",
"legend",
"(",
"[",
"str",
"(",
"i",
")",
"+",
"'_'",
"+",
"str",
"(",
"T",
")",
"+",
"'K'",
"for",
"T",
"in",
"temps",
"for",
"i",
"in",
"(",
"'x'",
",",
"'y'",
",",
"'z'",
")",
"]",
",",
"fontsize",
"=",
"20",
")",
"elif",
"output",
"==",
"'average'",
":",
"plt",
".",
"legend",
"(",
"fontsize",
"=",
"20",
")",
"plt",
".",
"tight_layout",
"(",
")",
"return",
"plt"
]
| 44.540984 | 18.57377 |
def upgrade(self, dependencies=False, prerelease=False, force=False):
"""
Upgrade the package unconditionaly
Args:
dependencies: update package dependencies if True (see pip --no-deps)
prerelease: update to pre-release and development versions
force: reinstall all packages even if they are already up-to-date
Returns True if pip was sucessful
"""
pip_args = ['install', self.pkg]
found = self._get_current() != (-1)
if found:
pip_args.append("--upgrade")
if force:
pip_args.append(
"--force-reinstall" if found else "--ignore-installed")
if not dependencies:
pip_args.append("--no-deps")
if prerelease:
pip_args.append("--pre")
proxy = os.environ.get('http_proxy')
if proxy:
pip_args.extend(['--proxy', proxy])
if self.__index:
pip_args.extend(['-i', self.index])
try:
ecode = pip.main(args=pip_args)
except TypeError:
# pip changed in 0.6.0 from initial_args to args, this is for backwards compatibility
# can be removed when pip 0.5 is no longer in use at all (approx.
# year 2025)
ecode = pip.main(initial_args=pip_args)
if ecode != 0:
raise PIPError(ecode) | [
"def",
"upgrade",
"(",
"self",
",",
"dependencies",
"=",
"False",
",",
"prerelease",
"=",
"False",
",",
"force",
"=",
"False",
")",
":",
"pip_args",
"=",
"[",
"'install'",
",",
"self",
".",
"pkg",
"]",
"found",
"=",
"self",
".",
"_get_current",
"(",
")",
"!=",
"(",
"-",
"1",
")",
"if",
"found",
":",
"pip_args",
".",
"append",
"(",
"\"--upgrade\"",
")",
"if",
"force",
":",
"pip_args",
".",
"append",
"(",
"\"--force-reinstall\"",
"if",
"found",
"else",
"\"--ignore-installed\"",
")",
"if",
"not",
"dependencies",
":",
"pip_args",
".",
"append",
"(",
"\"--no-deps\"",
")",
"if",
"prerelease",
":",
"pip_args",
".",
"append",
"(",
"\"--pre\"",
")",
"proxy",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'http_proxy'",
")",
"if",
"proxy",
":",
"pip_args",
".",
"extend",
"(",
"[",
"'--proxy'",
",",
"proxy",
"]",
")",
"if",
"self",
".",
"__index",
":",
"pip_args",
".",
"extend",
"(",
"[",
"'-i'",
",",
"self",
".",
"index",
"]",
")",
"try",
":",
"ecode",
"=",
"pip",
".",
"main",
"(",
"args",
"=",
"pip_args",
")",
"except",
"TypeError",
":",
"# pip changed in 0.6.0 from initial_args to args, this is for backwards compatibility",
"# can be removed when pip 0.5 is no longer in use at all (approx.",
"# year 2025)",
"ecode",
"=",
"pip",
".",
"main",
"(",
"initial_args",
"=",
"pip_args",
")",
"if",
"ecode",
"!=",
"0",
":",
"raise",
"PIPError",
"(",
"ecode",
")"
]
| 32.380952 | 20.52381 |
def add_result(self, scan_id, result_type, host='', name='', value='',
port='', test_id='', severity='', qod=''):
""" Add a result to a scan in the table. """
assert scan_id
assert len(name) or len(value)
result = dict()
result['type'] = result_type
result['name'] = name
result['severity'] = severity
result['test_id'] = test_id
result['value'] = value
result['host'] = host
result['port'] = port
result['qod'] = qod
results = self.scans_table[scan_id]['results']
results.append(result)
# Set scan_info's results to propagate results to parent process.
self.scans_table[scan_id]['results'] = results | [
"def",
"add_result",
"(",
"self",
",",
"scan_id",
",",
"result_type",
",",
"host",
"=",
"''",
",",
"name",
"=",
"''",
",",
"value",
"=",
"''",
",",
"port",
"=",
"''",
",",
"test_id",
"=",
"''",
",",
"severity",
"=",
"''",
",",
"qod",
"=",
"''",
")",
":",
"assert",
"scan_id",
"assert",
"len",
"(",
"name",
")",
"or",
"len",
"(",
"value",
")",
"result",
"=",
"dict",
"(",
")",
"result",
"[",
"'type'",
"]",
"=",
"result_type",
"result",
"[",
"'name'",
"]",
"=",
"name",
"result",
"[",
"'severity'",
"]",
"=",
"severity",
"result",
"[",
"'test_id'",
"]",
"=",
"test_id",
"result",
"[",
"'value'",
"]",
"=",
"value",
"result",
"[",
"'host'",
"]",
"=",
"host",
"result",
"[",
"'port'",
"]",
"=",
"port",
"result",
"[",
"'qod'",
"]",
"=",
"qod",
"results",
"=",
"self",
".",
"scans_table",
"[",
"scan_id",
"]",
"[",
"'results'",
"]",
"results",
".",
"append",
"(",
"result",
")",
"# Set scan_info's results to propagate results to parent process.",
"self",
".",
"scans_table",
"[",
"scan_id",
"]",
"[",
"'results'",
"]",
"=",
"results"
]
| 38.421053 | 14 |
def tile_images(data, padsize=1, padval=0):
"""
Convert an array with shape of (B, C, H, W) into a tiled image.
Args:
data (~numpy.ndarray): An array with shape of (B, C, H, W).
padsize (int): Each tile has padding with this size.
padval (float): Padding pixels are filled with this value.
Returns:
tile_image (~numpy.ndarray): A tile image.
"""
assert(data.ndim == 4)
data = data.transpose(0, 2, 3, 1)
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (
(0, n ** 2 - data.shape[0]),
(0, padsize),
(0, padsize)
) + ((0, 0),) * (data.ndim - 3)
data = np.pad(
data, padding, mode='constant', constant_values=(padval, padval))
data = data.reshape(
(n, n)
+ data.shape[1:]
).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape(
(n * data.shape[1], n * data.shape[3]) + data.shape[4:])
if data.shape[2] == 1:
# Return as (H, W)
return data.reshape(data.shape[:2])
return data | [
"def",
"tile_images",
"(",
"data",
",",
"padsize",
"=",
"1",
",",
"padval",
"=",
"0",
")",
":",
"assert",
"(",
"data",
".",
"ndim",
"==",
"4",
")",
"data",
"=",
"data",
".",
"transpose",
"(",
"0",
",",
"2",
",",
"3",
",",
"1",
")",
"# force the number of filters to be square",
"n",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"np",
".",
"sqrt",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
")",
")",
"padding",
"=",
"(",
"(",
"0",
",",
"n",
"**",
"2",
"-",
"data",
".",
"shape",
"[",
"0",
"]",
")",
",",
"(",
"0",
",",
"padsize",
")",
",",
"(",
"0",
",",
"padsize",
")",
")",
"+",
"(",
"(",
"0",
",",
"0",
")",
",",
")",
"*",
"(",
"data",
".",
"ndim",
"-",
"3",
")",
"data",
"=",
"np",
".",
"pad",
"(",
"data",
",",
"padding",
",",
"mode",
"=",
"'constant'",
",",
"constant_values",
"=",
"(",
"padval",
",",
"padval",
")",
")",
"data",
"=",
"data",
".",
"reshape",
"(",
"(",
"n",
",",
"n",
")",
"+",
"data",
".",
"shape",
"[",
"1",
":",
"]",
")",
".",
"transpose",
"(",
"(",
"0",
",",
"2",
",",
"1",
",",
"3",
")",
"+",
"tuple",
"(",
"range",
"(",
"4",
",",
"data",
".",
"ndim",
"+",
"1",
")",
")",
")",
"data",
"=",
"data",
".",
"reshape",
"(",
"(",
"n",
"*",
"data",
".",
"shape",
"[",
"1",
"]",
",",
"n",
"*",
"data",
".",
"shape",
"[",
"3",
"]",
")",
"+",
"data",
".",
"shape",
"[",
"4",
":",
"]",
")",
"if",
"data",
".",
"shape",
"[",
"2",
"]",
"==",
"1",
":",
"# Return as (H, W)",
"return",
"data",
".",
"reshape",
"(",
"data",
".",
"shape",
"[",
":",
"2",
"]",
")",
"return",
"data"
]
| 30.885714 | 18.942857 |
def protoc_command(lang, output_dir, proto_path, refactored_dir):
"""Runs the "protoc" command on the refactored Protobuf files to generate
the source python/python3 files.
Args:
lang (str): the language to compile with "protoc"
(i.e. python, python3)
output_dir (str): the output directory for the generated source files
proto_path (str): the root protobuf build path in which to run "protoc"
refactored_dir (str): the input directory of the Protobuf files
"""
proto_files = glob.glob(os.path.join(refactored_dir, '*.proto'))
cmd = ['protoc', '-I', proto_path, '--{}_out'.format(lang), output_dir]
cmd.extend(proto_files)
print(' '.join(cmd))
p = subprocess.Popen(
cmd, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin,
cwd=proto_path)
p.communicate() | [
"def",
"protoc_command",
"(",
"lang",
",",
"output_dir",
",",
"proto_path",
",",
"refactored_dir",
")",
":",
"proto_files",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"refactored_dir",
",",
"'*.proto'",
")",
")",
"cmd",
"=",
"[",
"'protoc'",
",",
"'-I'",
",",
"proto_path",
",",
"'--{}_out'",
".",
"format",
"(",
"lang",
")",
",",
"output_dir",
"]",
"cmd",
".",
"extend",
"(",
"proto_files",
")",
"print",
"(",
"' '",
".",
"join",
"(",
"cmd",
")",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"sys",
".",
"stdout",
",",
"stderr",
"=",
"sys",
".",
"stderr",
",",
"stdin",
"=",
"sys",
".",
"stdin",
",",
"cwd",
"=",
"proto_path",
")",
"p",
".",
"communicate",
"(",
")"
]
| 44.631579 | 20.947368 |
def _load_resource(self, source_r, abs_path=False):
"""The CSV package has no reseources, so we just need to resolve the URLs to them. Usually, the
CSV package is built from a file system ackage on a publically acessible server. """
r = self.doc.resource(source_r.name)
r.url = self.resource_root.join(r.url).inner | [
"def",
"_load_resource",
"(",
"self",
",",
"source_r",
",",
"abs_path",
"=",
"False",
")",
":",
"r",
"=",
"self",
".",
"doc",
".",
"resource",
"(",
"source_r",
".",
"name",
")",
"r",
".",
"url",
"=",
"self",
".",
"resource_root",
".",
"join",
"(",
"r",
".",
"url",
")",
".",
"inner"
]
| 49.428571 | 15.285714 |
def _submit_resource_request(self):
"""
**Purpose**: Create and submits a RADICAL Pilot Job as per the user
provided resource description
"""
try:
self._prof.prof('creating rreq', uid=self._uid)
def _pilot_state_cb(pilot, state):
self._logger.info('Pilot %s state: %s' % (pilot.uid, state))
if state == rp.FAILED:
self._logger.error('Pilot has failed')
elif state == rp.DONE:
self._logger.error('Pilot has completed')
self._session = rp.Session(dburl=self._mlab_url, uid=self._sid)
self._pmgr = rp.PilotManager(session=self._session)
self._pmgr.register_callback(_pilot_state_cb)
pd_init = {
'resource': self._resource,
'runtime': self._walltime,
'cores': self._cpus,
'project': self._project,
}
if self._gpus:
pd_init['gpus'] = self._gpus
if self._access_schema:
pd_init['access_schema'] = self._access_schema
if self._queue:
pd_init['queue'] = self._queue
if self._rts_config.get('sandbox_cleanup', None):
pd_init['cleanup'] = True
# Create Compute Pilot with validated resource description
pdesc = rp.ComputePilotDescription(pd_init)
self._prof.prof('rreq created', uid=self._uid)
# Launch the pilot
self._pilot = self._pmgr.submit_pilots(pdesc)
self._prof.prof('rreq submitted', uid=self._uid)
shared_staging_directives = list()
for data in self._shared_data:
temp = {
'source': data,
'target': 'pilot:///' + os.path.basename(data)
}
shared_staging_directives.append(temp)
self._pilot.stage_in(shared_staging_directives)
self._prof.prof('shared data staging initiated', uid=self._uid)
self._logger.info('Resource request submission successful.. waiting for pilot to go Active')
# Wait for pilot to go active
self._pilot.wait([rp.PMGR_ACTIVE, rp.FAILED, rp.CANCELED])
self._prof.prof('resource active', uid=self._uid)
self._logger.info('Pilot is now active')
except KeyboardInterrupt:
if self._session:
self._session.close()
self._logger.exception('Execution interrupted by user (you probably hit Ctrl+C), ' +
'trying to exit callback thread gracefully...')
raise KeyboardInterrupt
except Exception, ex:
self._logger.exception('Resource request submission failed')
raise | [
"def",
"_submit_resource_request",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_prof",
".",
"prof",
"(",
"'creating rreq'",
",",
"uid",
"=",
"self",
".",
"_uid",
")",
"def",
"_pilot_state_cb",
"(",
"pilot",
",",
"state",
")",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"'Pilot %s state: %s'",
"%",
"(",
"pilot",
".",
"uid",
",",
"state",
")",
")",
"if",
"state",
"==",
"rp",
".",
"FAILED",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"'Pilot has failed'",
")",
"elif",
"state",
"==",
"rp",
".",
"DONE",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"'Pilot has completed'",
")",
"self",
".",
"_session",
"=",
"rp",
".",
"Session",
"(",
"dburl",
"=",
"self",
".",
"_mlab_url",
",",
"uid",
"=",
"self",
".",
"_sid",
")",
"self",
".",
"_pmgr",
"=",
"rp",
".",
"PilotManager",
"(",
"session",
"=",
"self",
".",
"_session",
")",
"self",
".",
"_pmgr",
".",
"register_callback",
"(",
"_pilot_state_cb",
")",
"pd_init",
"=",
"{",
"'resource'",
":",
"self",
".",
"_resource",
",",
"'runtime'",
":",
"self",
".",
"_walltime",
",",
"'cores'",
":",
"self",
".",
"_cpus",
",",
"'project'",
":",
"self",
".",
"_project",
",",
"}",
"if",
"self",
".",
"_gpus",
":",
"pd_init",
"[",
"'gpus'",
"]",
"=",
"self",
".",
"_gpus",
"if",
"self",
".",
"_access_schema",
":",
"pd_init",
"[",
"'access_schema'",
"]",
"=",
"self",
".",
"_access_schema",
"if",
"self",
".",
"_queue",
":",
"pd_init",
"[",
"'queue'",
"]",
"=",
"self",
".",
"_queue",
"if",
"self",
".",
"_rts_config",
".",
"get",
"(",
"'sandbox_cleanup'",
",",
"None",
")",
":",
"pd_init",
"[",
"'cleanup'",
"]",
"=",
"True",
"# Create Compute Pilot with validated resource description",
"pdesc",
"=",
"rp",
".",
"ComputePilotDescription",
"(",
"pd_init",
")",
"self",
".",
"_prof",
".",
"prof",
"(",
"'rreq created'",
",",
"uid",
"=",
"self",
".",
"_uid",
")",
"# Launch the pilot",
"self",
".",
"_pilot",
"=",
"self",
".",
"_pmgr",
".",
"submit_pilots",
"(",
"pdesc",
")",
"self",
".",
"_prof",
".",
"prof",
"(",
"'rreq submitted'",
",",
"uid",
"=",
"self",
".",
"_uid",
")",
"shared_staging_directives",
"=",
"list",
"(",
")",
"for",
"data",
"in",
"self",
".",
"_shared_data",
":",
"temp",
"=",
"{",
"'source'",
":",
"data",
",",
"'target'",
":",
"'pilot:///'",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"data",
")",
"}",
"shared_staging_directives",
".",
"append",
"(",
"temp",
")",
"self",
".",
"_pilot",
".",
"stage_in",
"(",
"shared_staging_directives",
")",
"self",
".",
"_prof",
".",
"prof",
"(",
"'shared data staging initiated'",
",",
"uid",
"=",
"self",
".",
"_uid",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"'Resource request submission successful.. waiting for pilot to go Active'",
")",
"# Wait for pilot to go active",
"self",
".",
"_pilot",
".",
"wait",
"(",
"[",
"rp",
".",
"PMGR_ACTIVE",
",",
"rp",
".",
"FAILED",
",",
"rp",
".",
"CANCELED",
"]",
")",
"self",
".",
"_prof",
".",
"prof",
"(",
"'resource active'",
",",
"uid",
"=",
"self",
".",
"_uid",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"'Pilot is now active'",
")",
"except",
"KeyboardInterrupt",
":",
"if",
"self",
".",
"_session",
":",
"self",
".",
"_session",
".",
"close",
"(",
")",
"self",
".",
"_logger",
".",
"exception",
"(",
"'Execution interrupted by user (you probably hit Ctrl+C), '",
"+",
"'trying to exit callback thread gracefully...'",
")",
"raise",
"KeyboardInterrupt",
"except",
"Exception",
",",
"ex",
":",
"self",
".",
"_logger",
".",
"exception",
"(",
"'Resource request submission failed'",
")",
"raise"
]
| 33.819277 | 23.216867 |
def Parse(self):
"""Iterator returning dict for each entry in history."""
for data in self.Query(self.EVENTS_QUERY):
(timestamp, agent_bundle_identifier, agent_name, url, sender,
sender_address, type_number, title, referrer, referrer_alias) = data
yield [
timestamp, "OSX_QUARANTINE", url, referrer, title, agent_name,
agent_bundle_identifier, sender, sender_address, type_number,
referrer_alias
] | [
"def",
"Parse",
"(",
"self",
")",
":",
"for",
"data",
"in",
"self",
".",
"Query",
"(",
"self",
".",
"EVENTS_QUERY",
")",
":",
"(",
"timestamp",
",",
"agent_bundle_identifier",
",",
"agent_name",
",",
"url",
",",
"sender",
",",
"sender_address",
",",
"type_number",
",",
"title",
",",
"referrer",
",",
"referrer_alias",
")",
"=",
"data",
"yield",
"[",
"timestamp",
",",
"\"OSX_QUARANTINE\"",
",",
"url",
",",
"referrer",
",",
"title",
",",
"agent_name",
",",
"agent_bundle_identifier",
",",
"sender",
",",
"sender_address",
",",
"type_number",
",",
"referrer_alias",
"]"
]
| 45.1 | 23.1 |
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1) | [
"def",
"_kp2",
"(",
"A",
",",
"B",
")",
":",
"N",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"if",
"B",
".",
"shape",
"[",
"0",
"]",
"!=",
"N",
":",
"raise",
"(",
"ValueError",
")",
"newshape1",
"=",
"A",
".",
"shape",
"[",
"1",
"]",
"*",
"B",
".",
"shape",
"[",
"1",
"]",
"return",
"np",
".",
"einsum",
"(",
"'ijk,ilm->ijlkm'",
",",
"A",
",",
"B",
")",
".",
"reshape",
"(",
"N",
",",
"newshape1",
",",
"-",
"1",
")"
]
| 37.1 | 14.7 |
def transform(self, col):
"""Prepare the transformer to convert data and return the processed table.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
"""
out = pd.DataFrame()
out[self.col_name] = self.safe_datetime_cast(col)
out[self.col_name] = self.to_timestamp(out)
return out | [
"def",
"transform",
"(",
"self",
",",
"col",
")",
":",
"out",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"out",
"[",
"self",
".",
"col_name",
"]",
"=",
"self",
".",
"safe_datetime_cast",
"(",
"col",
")",
"out",
"[",
"self",
".",
"col_name",
"]",
"=",
"self",
".",
"to_timestamp",
"(",
"out",
")",
"return",
"out"
]
| 27.285714 | 19.5 |
def create_menu(self, menu_data):
"""
创建自定义菜单 ::
# -*- coding: utf-8 -*-
wechat = WechatBasic(appid='appid', appsecret='appsecret')
wechat.create_menu({
'button':[
{
'type': 'click',
'name': '今日歌曲',
'key': 'V1001_TODAY_MUSIC'
},
{
'type': 'click',
'name': '歌手简介',
'key': 'V1001_TODAY_SINGER'
},
{
'name': '菜单',
'sub_button': [
{
'type': 'view',
'name': '搜索',
'url': 'http://www.soso.com/'
},
{
'type': 'view',
'name': '视频',
'url': 'http://v.qq.com/'
},
{
'type': 'click',
'name': '赞一下我们',
'key': 'V1001_GOOD'
}
]
}
]})
详情请参考 http://mp.weixin.qq.com/wiki/13/43de8269be54a0a6f64413e4dfa94f39.html
:param menu_data: Python 字典
:return: 返回的 JSON 数据包
"""
menu_data = self._transcoding_dict(menu_data)
return self.request.post(
url='https://api.weixin.qq.com/cgi-bin/menu/create',
data=menu_data
) | [
"def",
"create_menu",
"(",
"self",
",",
"menu_data",
")",
":",
"menu_data",
"=",
"self",
".",
"_transcoding_dict",
"(",
"menu_data",
")",
"return",
"self",
".",
"request",
".",
"post",
"(",
"url",
"=",
"'https://api.weixin.qq.com/cgi-bin/menu/create'",
",",
"data",
"=",
"menu_data",
")"
]
| 34.693878 | 13.102041 |
def compute_lower_upper_errors(sample, num_sigma=1):
"""
computes the upper and lower sigma from the median value.
This functions gives good error estimates for skewed pdf's
:param sample: 1-D sample
:return: median, lower_sigma, upper_sigma
"""
if num_sigma > 3:
raise ValueError("Number of sigma-constraints restircted to three. %s not valid" % num_sigma)
num = len(sample)
num_threshold1 = int(round((num-1)*0.833))
num_threshold2 = int(round((num-1)*0.977249868))
num_threshold3 = int(round((num-1)*0.998650102))
median = np.median(sample)
sorted_sample = np.sort(sample)
if num_sigma > 0:
upper_sigma1 = sorted_sample[num_threshold1-1]
lower_sigma1 = sorted_sample[num-num_threshold1-1]
else:
return median, [[]]
if num_sigma > 1:
upper_sigma2 = sorted_sample[num_threshold2-1]
lower_sigma2 = sorted_sample[num-num_threshold2-1]
else:
return median, [[median-lower_sigma1, upper_sigma1-median]]
if num_sigma > 2:
upper_sigma3 = sorted_sample[num_threshold3-1]
lower_sigma3 = sorted_sample[num-num_threshold3-1]
return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median],
[median-lower_sigma3, upper_sigma3-median]]
else:
return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median]] | [
"def",
"compute_lower_upper_errors",
"(",
"sample",
",",
"num_sigma",
"=",
"1",
")",
":",
"if",
"num_sigma",
">",
"3",
":",
"raise",
"ValueError",
"(",
"\"Number of sigma-constraints restircted to three. %s not valid\"",
"%",
"num_sigma",
")",
"num",
"=",
"len",
"(",
"sample",
")",
"num_threshold1",
"=",
"int",
"(",
"round",
"(",
"(",
"num",
"-",
"1",
")",
"*",
"0.833",
")",
")",
"num_threshold2",
"=",
"int",
"(",
"round",
"(",
"(",
"num",
"-",
"1",
")",
"*",
"0.977249868",
")",
")",
"num_threshold3",
"=",
"int",
"(",
"round",
"(",
"(",
"num",
"-",
"1",
")",
"*",
"0.998650102",
")",
")",
"median",
"=",
"np",
".",
"median",
"(",
"sample",
")",
"sorted_sample",
"=",
"np",
".",
"sort",
"(",
"sample",
")",
"if",
"num_sigma",
">",
"0",
":",
"upper_sigma1",
"=",
"sorted_sample",
"[",
"num_threshold1",
"-",
"1",
"]",
"lower_sigma1",
"=",
"sorted_sample",
"[",
"num",
"-",
"num_threshold1",
"-",
"1",
"]",
"else",
":",
"return",
"median",
",",
"[",
"[",
"]",
"]",
"if",
"num_sigma",
">",
"1",
":",
"upper_sigma2",
"=",
"sorted_sample",
"[",
"num_threshold2",
"-",
"1",
"]",
"lower_sigma2",
"=",
"sorted_sample",
"[",
"num",
"-",
"num_threshold2",
"-",
"1",
"]",
"else",
":",
"return",
"median",
",",
"[",
"[",
"median",
"-",
"lower_sigma1",
",",
"upper_sigma1",
"-",
"median",
"]",
"]",
"if",
"num_sigma",
">",
"2",
":",
"upper_sigma3",
"=",
"sorted_sample",
"[",
"num_threshold3",
"-",
"1",
"]",
"lower_sigma3",
"=",
"sorted_sample",
"[",
"num",
"-",
"num_threshold3",
"-",
"1",
"]",
"return",
"median",
",",
"[",
"[",
"median",
"-",
"lower_sigma1",
",",
"upper_sigma1",
"-",
"median",
"]",
",",
"[",
"median",
"-",
"lower_sigma2",
",",
"upper_sigma2",
"-",
"median",
"]",
",",
"[",
"median",
"-",
"lower_sigma3",
",",
"upper_sigma3",
"-",
"median",
"]",
"]",
"else",
":",
"return",
"median",
",",
"[",
"[",
"median",
"-",
"lower_sigma1",
",",
"upper_sigma1",
"-",
"median",
"]",
",",
"[",
"median",
"-",
"lower_sigma2",
",",
"upper_sigma2",
"-",
"median",
"]",
"]"
]
| 43.272727 | 21.454545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.