repo
stringlengths 7
55
| path
stringlengths 4
223
| url
stringlengths 87
315
| code
stringlengths 75
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values | avg_line_len
float64 7.91
980
|
---|---|---|---|---|---|---|---|---|---|
lebinh/aq | aq/engines.py | https://github.com/lebinh/aq/blob/eb366dd063db25598daa70a216170776e83383f4/aq/engines.py#L75-L89 | def load_table(self, table):
"""
Load resources as specified by given table into our db.
"""
region = table.database if table.database else self.default_region
resource_name, collection_name = table.table.split('_', 1)
# we use underscore "_" instead of dash "-" for region name but boto3 need dash
boto_region_name = region.replace('_', '-')
resource = self.boto3_session.resource(resource_name, region_name=boto_region_name)
if not hasattr(resource, collection_name):
raise QueryError(
'Unknown collection <{0}> of resource <{1}>'.format(collection_name, resource_name))
self.attach_region(region)
self.refresh_table(region, table.table, resource, getattr(resource, collection_name)) | [
"def",
"load_table",
"(",
"self",
",",
"table",
")",
":",
"region",
"=",
"table",
".",
"database",
"if",
"table",
".",
"database",
"else",
"self",
".",
"default_region",
"resource_name",
",",
"collection_name",
"=",
"table",
".",
"table",
".",
"split",
"(",
"'_'",
",",
"1",
")",
"# we use underscore \"_\" instead of dash \"-\" for region name but boto3 need dash",
"boto_region_name",
"=",
"region",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"resource",
"=",
"self",
".",
"boto3_session",
".",
"resource",
"(",
"resource_name",
",",
"region_name",
"=",
"boto_region_name",
")",
"if",
"not",
"hasattr",
"(",
"resource",
",",
"collection_name",
")",
":",
"raise",
"QueryError",
"(",
"'Unknown collection <{0}> of resource <{1}>'",
".",
"format",
"(",
"collection_name",
",",
"resource_name",
")",
")",
"self",
".",
"attach_region",
"(",
"region",
")",
"self",
".",
"refresh_table",
"(",
"region",
",",
"table",
".",
"table",
",",
"resource",
",",
"getattr",
"(",
"resource",
",",
"collection_name",
")",
")"
]
| Load resources as specified by given table into our db. | [
"Load",
"resources",
"as",
"specified",
"by",
"given",
"table",
"into",
"our",
"db",
"."
]
| python | train | 52.533333 |
pytroll/satpy | satpy/readers/viirs_sdr.py | https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/viirs_sdr.py#L60-L92 | def _get_invalid_info(granule_data):
"""Get a detailed report of the missing data.
N/A: not applicable
MISS: required value missing at time of processing
OBPT: onboard pixel trim (overlapping/bow-tie pixel removed during
SDR processing)
OGPT: on-ground pixel trim (overlapping/bow-tie pixel removed
during EDR processing)
ERR: error occurred during processing / non-convergence
ELINT: ellipsoid intersect failed / instrument line-of-sight does
not intersect the Earth’s surface
VDNE: value does not exist / processing algorithm did not execute
SOUB: scaled out-of-bounds / solution not within allowed range
"""
if issubclass(granule_data.dtype.type, np.integer):
msg = ("na:" + str((granule_data == 65535).sum()) +
" miss:" + str((granule_data == 65534).sum()) +
" obpt:" + str((granule_data == 65533).sum()) +
" ogpt:" + str((granule_data == 65532).sum()) +
" err:" + str((granule_data == 65531).sum()) +
" elint:" + str((granule_data == 65530).sum()) +
" vdne:" + str((granule_data == 65529).sum()) +
" soub:" + str((granule_data == 65528).sum()))
elif issubclass(granule_data.dtype.type, np.floating):
msg = ("na:" + str((granule_data == -999.9).sum()) +
" miss:" + str((granule_data == -999.8).sum()) +
" obpt:" + str((granule_data == -999.7).sum()) +
" ogpt:" + str((granule_data == -999.6).sum()) +
" err:" + str((granule_data == -999.5).sum()) +
" elint:" + str((granule_data == -999.4).sum()) +
" vdne:" + str((granule_data == -999.3).sum()) +
" soub:" + str((granule_data == -999.2).sum()))
return msg | [
"def",
"_get_invalid_info",
"(",
"granule_data",
")",
":",
"if",
"issubclass",
"(",
"granule_data",
".",
"dtype",
".",
"type",
",",
"np",
".",
"integer",
")",
":",
"msg",
"=",
"(",
"\"na:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"65535",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" miss:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"65534",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" obpt:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"65533",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" ogpt:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"65532",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" err:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"65531",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" elint:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"65530",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" vdne:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"65529",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" soub:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"65528",
")",
".",
"sum",
"(",
")",
")",
")",
"elif",
"issubclass",
"(",
"granule_data",
".",
"dtype",
".",
"type",
",",
"np",
".",
"floating",
")",
":",
"msg",
"=",
"(",
"\"na:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"-",
"999.9",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" miss:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"-",
"999.8",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" obpt:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"-",
"999.7",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" ogpt:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"-",
"999.6",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" err:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"-",
"999.5",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" elint:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"-",
"999.4",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" vdne:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"-",
"999.3",
")",
".",
"sum",
"(",
")",
")",
"+",
"\" soub:\"",
"+",
"str",
"(",
"(",
"granule_data",
"==",
"-",
"999.2",
")",
".",
"sum",
"(",
")",
")",
")",
"return",
"msg"
]
| Get a detailed report of the missing data.
N/A: not applicable
MISS: required value missing at time of processing
OBPT: onboard pixel trim (overlapping/bow-tie pixel removed during
SDR processing)
OGPT: on-ground pixel trim (overlapping/bow-tie pixel removed
during EDR processing)
ERR: error occurred during processing / non-convergence
ELINT: ellipsoid intersect failed / instrument line-of-sight does
not intersect the Earth’s surface
VDNE: value does not exist / processing algorithm did not execute
SOUB: scaled out-of-bounds / solution not within allowed range | [
"Get",
"a",
"detailed",
"report",
"of",
"the",
"missing",
"data",
".",
"N",
"/",
"A",
":",
"not",
"applicable",
"MISS",
":",
"required",
"value",
"missing",
"at",
"time",
"of",
"processing",
"OBPT",
":",
"onboard",
"pixel",
"trim",
"(",
"overlapping",
"/",
"bow",
"-",
"tie",
"pixel",
"removed",
"during",
"SDR",
"processing",
")",
"OGPT",
":",
"on",
"-",
"ground",
"pixel",
"trim",
"(",
"overlapping",
"/",
"bow",
"-",
"tie",
"pixel",
"removed",
"during",
"EDR",
"processing",
")",
"ERR",
":",
"error",
"occurred",
"during",
"processing",
"/",
"non",
"-",
"convergence",
"ELINT",
":",
"ellipsoid",
"intersect",
"failed",
"/",
"instrument",
"line",
"-",
"of",
"-",
"sight",
"does",
"not",
"intersect",
"the",
"Earth’s",
"surface",
"VDNE",
":",
"value",
"does",
"not",
"exist",
"/",
"processing",
"algorithm",
"did",
"not",
"execute",
"SOUB",
":",
"scaled",
"out",
"-",
"of",
"-",
"bounds",
"/",
"solution",
"not",
"within",
"allowed",
"range"
]
| python | train | 55.272727 |
NASA-AMMOS/AIT-Core | ait/core/seq.py | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/seq.py#L428-L430 | def encode (self):
"""Encodes this SeqCmd to binary and returns a bytearray."""
return self.attrs.encode() + self.delay.encode() + self.cmd.encode() | [
"def",
"encode",
"(",
"self",
")",
":",
"return",
"self",
".",
"attrs",
".",
"encode",
"(",
")",
"+",
"self",
".",
"delay",
".",
"encode",
"(",
")",
"+",
"self",
".",
"cmd",
".",
"encode",
"(",
")"
]
| Encodes this SeqCmd to binary and returns a bytearray. | [
"Encodes",
"this",
"SeqCmd",
"to",
"binary",
"and",
"returns",
"a",
"bytearray",
"."
]
| python | train | 51.333333 |
google/grumpy | third_party/pypy/datetime.py | https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pypy/datetime.py#L1535-L1544 | def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
offset = self._utcoffset()
if offset: # neither None nor 0
mm -= offset
y, m, d, hh, mm, ss, _ = _normalize_datetime(
y, m, d, hh, mm, ss, 0, ignore_overflow=True)
return _build_struct_time(y, m, d, hh, mm, ss, 0) | [
"def",
"utctimetuple",
"(",
"self",
")",
":",
"y",
",",
"m",
",",
"d",
"=",
"self",
".",
"year",
",",
"self",
".",
"month",
",",
"self",
".",
"day",
"hh",
",",
"mm",
",",
"ss",
"=",
"self",
".",
"hour",
",",
"self",
".",
"minute",
",",
"self",
".",
"second",
"offset",
"=",
"self",
".",
"_utcoffset",
"(",
")",
"if",
"offset",
":",
"# neither None nor 0",
"mm",
"-=",
"offset",
"y",
",",
"m",
",",
"d",
",",
"hh",
",",
"mm",
",",
"ss",
",",
"_",
"=",
"_normalize_datetime",
"(",
"y",
",",
"m",
",",
"d",
",",
"hh",
",",
"mm",
",",
"ss",
",",
"0",
",",
"ignore_overflow",
"=",
"True",
")",
"return",
"_build_struct_time",
"(",
"y",
",",
"m",
",",
"d",
",",
"hh",
",",
"mm",
",",
"ss",
",",
"0",
")"
]
| Return UTC time tuple compatible with time.gmtime(). | [
"Return",
"UTC",
"time",
"tuple",
"compatible",
"with",
"time",
".",
"gmtime",
"()",
"."
]
| python | valid | 46.3 |
yunojuno/elasticsearch-django | elasticsearch_django/models.py | https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/models.py#L257-L306 | def as_search_document_update(self, *, index, update_fields):
"""
Return a partial update document based on which fields have been updated.
If an object is saved with the `update_fields` argument passed
through, then it is assumed that this is a 'partial update'. In
this scenario we need a {property: value} dictionary containing
just the fields we want to update.
This method handles two possible update strategies - 'full' or 'partial'.
The default 'full' strategy simply returns the value of `as_search_document`
- thereby replacing the entire document each time. The 'partial' strategy is
more intelligent - it will determine whether the fields passed are in the
search document mapping, and return a partial update document that contains
only those that are. In addition, if any field that _is_ included cannot
be automatically serialized (e.g. a RelatedField object), then this method
will raise a ValueError. In this scenario, you should override this method
in your subclass.
>>> def as_search_document_update(self, index, update_fields):
... if 'user' in update_fields:
... update_fields.remove('user')
... doc = super().as_search_document_update(index, update_fields)
... doc['user'] = self.user.get_full_name()
... return doc
... return super().as_search_document_update(index, update_fields)
You may also wish to subclass this method to perform field-specific logic
- in this example if only the timestamp is being saved, then ignore the
update if the timestamp is later than a certain time.
>>> def as_search_document_update(self, index, update_fields):
... if update_fields == ['timestamp']:
... if self.timestamp > today():
... return {}
... return super().as_search_document_update(index, update_fields)
"""
if UPDATE_STRATEGY == UPDATE_STRATEGY_FULL:
return self.as_search_document(index=index)
if UPDATE_STRATEGY == UPDATE_STRATEGY_PARTIAL:
# in partial mode we update the intersection of update_fields and
# properties found in the mapping file.
return {
k: getattr(self, k)
for k in self.clean_update_fields(
index=index, update_fields=update_fields
)
} | [
"def",
"as_search_document_update",
"(",
"self",
",",
"*",
",",
"index",
",",
"update_fields",
")",
":",
"if",
"UPDATE_STRATEGY",
"==",
"UPDATE_STRATEGY_FULL",
":",
"return",
"self",
".",
"as_search_document",
"(",
"index",
"=",
"index",
")",
"if",
"UPDATE_STRATEGY",
"==",
"UPDATE_STRATEGY_PARTIAL",
":",
"# in partial mode we update the intersection of update_fields and",
"# properties found in the mapping file.",
"return",
"{",
"k",
":",
"getattr",
"(",
"self",
",",
"k",
")",
"for",
"k",
"in",
"self",
".",
"clean_update_fields",
"(",
"index",
"=",
"index",
",",
"update_fields",
"=",
"update_fields",
")",
"}"
]
| Return a partial update document based on which fields have been updated.
If an object is saved with the `update_fields` argument passed
through, then it is assumed that this is a 'partial update'. In
this scenario we need a {property: value} dictionary containing
just the fields we want to update.
This method handles two possible update strategies - 'full' or 'partial'.
The default 'full' strategy simply returns the value of `as_search_document`
- thereby replacing the entire document each time. The 'partial' strategy is
more intelligent - it will determine whether the fields passed are in the
search document mapping, and return a partial update document that contains
only those that are. In addition, if any field that _is_ included cannot
be automatically serialized (e.g. a RelatedField object), then this method
will raise a ValueError. In this scenario, you should override this method
in your subclass.
>>> def as_search_document_update(self, index, update_fields):
... if 'user' in update_fields:
... update_fields.remove('user')
... doc = super().as_search_document_update(index, update_fields)
... doc['user'] = self.user.get_full_name()
... return doc
... return super().as_search_document_update(index, update_fields)
You may also wish to subclass this method to perform field-specific logic
- in this example if only the timestamp is being saved, then ignore the
update if the timestamp is later than a certain time.
>>> def as_search_document_update(self, index, update_fields):
... if update_fields == ['timestamp']:
... if self.timestamp > today():
... return {}
... return super().as_search_document_update(index, update_fields) | [
"Return",
"a",
"partial",
"update",
"document",
"based",
"on",
"which",
"fields",
"have",
"been",
"updated",
"."
]
| python | train | 49.82 |
aiortc/aioice | aioice/turn.py | https://github.com/aiortc/aioice/blob/a04d810d94ec2d00eca9ce01eacca74b3b086616/aioice/turn.py#L161-L175 | async def request(self, request):
"""
Execute a STUN transaction and return the response.
"""
assert request.transaction_id not in self.transactions
if self.integrity_key:
self.__add_authentication(request)
transaction = stun.Transaction(request, self.server, self)
self.transactions[request.transaction_id] = transaction
try:
return await transaction.run()
finally:
del self.transactions[request.transaction_id] | [
"async",
"def",
"request",
"(",
"self",
",",
"request",
")",
":",
"assert",
"request",
".",
"transaction_id",
"not",
"in",
"self",
".",
"transactions",
"if",
"self",
".",
"integrity_key",
":",
"self",
".",
"__add_authentication",
"(",
"request",
")",
"transaction",
"=",
"stun",
".",
"Transaction",
"(",
"request",
",",
"self",
".",
"server",
",",
"self",
")",
"self",
".",
"transactions",
"[",
"request",
".",
"transaction_id",
"]",
"=",
"transaction",
"try",
":",
"return",
"await",
"transaction",
".",
"run",
"(",
")",
"finally",
":",
"del",
"self",
".",
"transactions",
"[",
"request",
".",
"transaction_id",
"]"
]
| Execute a STUN transaction and return the response. | [
"Execute",
"a",
"STUN",
"transaction",
"and",
"return",
"the",
"response",
"."
]
| python | train | 33.866667 |
peshay/tpm | tpm.py | https://github.com/peshay/tpm/blob/8e64a4d8b89d54bdd2c92d965463a7508aa3d0bc/tpm.py#L263-L267 | def list_projects_search(self, searchstring):
"""List projects with searchstring."""
log.debug('List all projects with: %s' % searchstring)
return self.collection('projects/search/%s.json' %
quote_plus(searchstring)) | [
"def",
"list_projects_search",
"(",
"self",
",",
"searchstring",
")",
":",
"log",
".",
"debug",
"(",
"'List all projects with: %s'",
"%",
"searchstring",
")",
"return",
"self",
".",
"collection",
"(",
"'projects/search/%s.json'",
"%",
"quote_plus",
"(",
"searchstring",
")",
")"
]
| List projects with searchstring. | [
"List",
"projects",
"with",
"searchstring",
"."
]
| python | train | 53.4 |
aarongarrett/inspyred | docs/moonshot.py | https://github.com/aarongarrett/inspyred/blob/d5976ab503cc9d51c6f586cbb7bb601a38c01128/docs/moonshot.py#L37-L50 | def gravitational_force(position_a, mass_a, position_b, mass_b):
"""Returns the gravitational force between the two bodies a and b."""
distance = distance_between(position_a, position_b)
# Calculate the direction and magnitude of the force.
angle = math.atan2(position_a[1] - position_b[1], position_a[0] - position_b[0])
magnitude = G * mass_a * mass_b / (distance**2)
# Find the x and y components of the force.
# Determine sign based on which one is the larger body.
sign = -1 if mass_b > mass_a else 1
x_force = sign * magnitude * math.cos(angle)
y_force = sign * magnitude * math.sin(angle)
return x_force, y_force | [
"def",
"gravitational_force",
"(",
"position_a",
",",
"mass_a",
",",
"position_b",
",",
"mass_b",
")",
":",
"distance",
"=",
"distance_between",
"(",
"position_a",
",",
"position_b",
")",
"# Calculate the direction and magnitude of the force.",
"angle",
"=",
"math",
".",
"atan2",
"(",
"position_a",
"[",
"1",
"]",
"-",
"position_b",
"[",
"1",
"]",
",",
"position_a",
"[",
"0",
"]",
"-",
"position_b",
"[",
"0",
"]",
")",
"magnitude",
"=",
"G",
"*",
"mass_a",
"*",
"mass_b",
"/",
"(",
"distance",
"**",
"2",
")",
"# Find the x and y components of the force.",
"# Determine sign based on which one is the larger body.",
"sign",
"=",
"-",
"1",
"if",
"mass_b",
">",
"mass_a",
"else",
"1",
"x_force",
"=",
"sign",
"*",
"magnitude",
"*",
"math",
".",
"cos",
"(",
"angle",
")",
"y_force",
"=",
"sign",
"*",
"magnitude",
"*",
"math",
".",
"sin",
"(",
"angle",
")",
"return",
"x_force",
",",
"y_force"
]
| Returns the gravitational force between the two bodies a and b. | [
"Returns",
"the",
"gravitational",
"force",
"between",
"the",
"two",
"bodies",
"a",
"and",
"b",
"."
]
| python | train | 46.571429 |
KarchinLab/probabilistic2020 | prob2020/python/indel.py | https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/indel.py#L213-L243 | def is_frameshift_len(mut_df):
"""Simply returns a series indicating whether each corresponding mutation
is a frameshift.
This is based on the length of the indel. Thus may be fooled by frameshifts
at exon-intron boundaries or other odd cases.
Parameters
----------
mut_df : pd.DataFrame
mutation input file as a dataframe in standard format
Returns
-------
is_fs : pd.Series
pandas series indicating if mutaitons are frameshifts
"""
# calculate length, 0-based coordinates
#indel_len = mut_df['End_Position'] - mut_df['Start_Position']
if 'indel len' in mut_df.columns:
indel_len = mut_df['indel len']
else:
indel_len = compute_indel_length(mut_df)
# only non multiples of 3 are frameshifts
is_fs = (indel_len%3)>0
# make sure no single base substitutions are counted
is_indel = (mut_df['Reference_Allele']=='-') | (mut_df['Tumor_Allele']=='-')
is_fs[~is_indel] = False
return is_fs | [
"def",
"is_frameshift_len",
"(",
"mut_df",
")",
":",
"# calculate length, 0-based coordinates",
"#indel_len = mut_df['End_Position'] - mut_df['Start_Position']",
"if",
"'indel len'",
"in",
"mut_df",
".",
"columns",
":",
"indel_len",
"=",
"mut_df",
"[",
"'indel len'",
"]",
"else",
":",
"indel_len",
"=",
"compute_indel_length",
"(",
"mut_df",
")",
"# only non multiples of 3 are frameshifts",
"is_fs",
"=",
"(",
"indel_len",
"%",
"3",
")",
">",
"0",
"# make sure no single base substitutions are counted",
"is_indel",
"=",
"(",
"mut_df",
"[",
"'Reference_Allele'",
"]",
"==",
"'-'",
")",
"|",
"(",
"mut_df",
"[",
"'Tumor_Allele'",
"]",
"==",
"'-'",
")",
"is_fs",
"[",
"~",
"is_indel",
"]",
"=",
"False",
"return",
"is_fs"
]
| Simply returns a series indicating whether each corresponding mutation
is a frameshift.
This is based on the length of the indel. Thus may be fooled by frameshifts
at exon-intron boundaries or other odd cases.
Parameters
----------
mut_df : pd.DataFrame
mutation input file as a dataframe in standard format
Returns
-------
is_fs : pd.Series
pandas series indicating if mutaitons are frameshifts | [
"Simply",
"returns",
"a",
"series",
"indicating",
"whether",
"each",
"corresponding",
"mutation",
"is",
"a",
"frameshift",
"."
]
| python | train | 31.419355 |
inspirehep/inspire-schemas | inspire_schemas/readers/literature.py | https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/readers/literature.py#L351-L382 | def is_published(self):
"""Return True if a record is published.
We say that a record is published if it is citeable, which means that
it has enough information in a ``publication_info``, or if we know its
DOI and a ``journal_title``, which means it is in press.
Returns:
bool: whether the record is published.
Examples:
>>> record = {
... 'dois': [
... {'value': '10.1016/0029-5582(61)90469-2'},
... ],
... 'publication_info': [
... {'journal_title': 'Nucl.Phys.'},
... ],
... }
>>> LiteratureReader(record).is_published
True
"""
citeable = 'publication_info' in self.record and \
is_citeable(self.record['publication_info'])
submitted = 'dois' in self.record and any(
'journal_title' in el for el in
force_list(self.record.get('publication_info'))
)
return citeable or submitted | [
"def",
"is_published",
"(",
"self",
")",
":",
"citeable",
"=",
"'publication_info'",
"in",
"self",
".",
"record",
"and",
"is_citeable",
"(",
"self",
".",
"record",
"[",
"'publication_info'",
"]",
")",
"submitted",
"=",
"'dois'",
"in",
"self",
".",
"record",
"and",
"any",
"(",
"'journal_title'",
"in",
"el",
"for",
"el",
"in",
"force_list",
"(",
"self",
".",
"record",
".",
"get",
"(",
"'publication_info'",
")",
")",
")",
"return",
"citeable",
"or",
"submitted"
]
| Return True if a record is published.
We say that a record is published if it is citeable, which means that
it has enough information in a ``publication_info``, or if we know its
DOI and a ``journal_title``, which means it is in press.
Returns:
bool: whether the record is published.
Examples:
>>> record = {
... 'dois': [
... {'value': '10.1016/0029-5582(61)90469-2'},
... ],
... 'publication_info': [
... {'journal_title': 'Nucl.Phys.'},
... ],
... }
>>> LiteratureReader(record).is_published
True | [
"Return",
"True",
"if",
"a",
"record",
"is",
"published",
"."
]
| python | train | 32.59375 |
waqasbhatti/astrobase | astrobase/hatsurveys/hatlc.py | https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/hatsurveys/hatlc.py#L456-L479 | def _pyuncompress_sqlitecurve(sqlitecurve, force=False):
'''This just uncompresses the sqlitecurve. Should be independent of OS.
'''
outfile = sqlitecurve.replace('.gz','')
try:
if os.path.exists(outfile) and not force:
return outfile
else:
with gzip.open(sqlitecurve,'rb') as infd:
with open(outfile,'wb') as outfd:
shutil.copyfileobj(infd, outfd)
# do not remove the intput file yet
if os.path.exists(outfile):
return outfile
except Exception as e:
return None | [
"def",
"_pyuncompress_sqlitecurve",
"(",
"sqlitecurve",
",",
"force",
"=",
"False",
")",
":",
"outfile",
"=",
"sqlitecurve",
".",
"replace",
"(",
"'.gz'",
",",
"''",
")",
"try",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
"and",
"not",
"force",
":",
"return",
"outfile",
"else",
":",
"with",
"gzip",
".",
"open",
"(",
"sqlitecurve",
",",
"'rb'",
")",
"as",
"infd",
":",
"with",
"open",
"(",
"outfile",
",",
"'wb'",
")",
"as",
"outfd",
":",
"shutil",
".",
"copyfileobj",
"(",
"infd",
",",
"outfd",
")",
"# do not remove the intput file yet",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"outfile",
")",
":",
"return",
"outfile",
"except",
"Exception",
"as",
"e",
":",
"return",
"None"
]
| This just uncompresses the sqlitecurve. Should be independent of OS. | [
"This",
"just",
"uncompresses",
"the",
"sqlitecurve",
".",
"Should",
"be",
"independent",
"of",
"OS",
"."
]
| python | valid | 24.625 |
quantopian/zipline | zipline/finance/ledger.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/finance/ledger.py#L114-L139 | def handle_splits(self, splits):
"""Processes a list of splits by modifying any positions as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
int: The leftover cash from fractional shares after modifying each
position.
"""
total_leftover_cash = 0
for asset, ratio in splits:
if asset in self.positions:
self._dirty_stats = True
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[asset]
leftover_cash = position.handle_split(asset, ratio)
total_leftover_cash += leftover_cash
return total_leftover_cash | [
"def",
"handle_splits",
"(",
"self",
",",
"splits",
")",
":",
"total_leftover_cash",
"=",
"0",
"for",
"asset",
",",
"ratio",
"in",
"splits",
":",
"if",
"asset",
"in",
"self",
".",
"positions",
":",
"self",
".",
"_dirty_stats",
"=",
"True",
"# Make the position object handle the split. It returns the",
"# leftover cash from a fractional share, if there is any.",
"position",
"=",
"self",
".",
"positions",
"[",
"asset",
"]",
"leftover_cash",
"=",
"position",
".",
"handle_split",
"(",
"asset",
",",
"ratio",
")",
"total_leftover_cash",
"+=",
"leftover_cash",
"return",
"total_leftover_cash"
]
| Processes a list of splits by modifying any positions as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
int: The leftover cash from fractional shares after modifying each
position. | [
"Processes",
"a",
"list",
"of",
"splits",
"by",
"modifying",
"any",
"positions",
"as",
"needed",
"."
]
| python | train | 33.230769 |
ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/external_ca/apis/certificate_issuers_api.py | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/external_ca/apis/certificate_issuers_api.py#L234-L253 | def get_certificate_issuer(self, certificate_issuer_id, **kwargs): # noqa: E501
"""Get certificate issuer by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required)
:return: CertificateIssuerInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501
else:
(data) = self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501
return data | [
"def",
"get_certificate_issuer",
"(",
"self",
",",
"certificate_issuer_id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'asynchronous'",
")",
":",
"return",
"self",
".",
"get_certificate_issuer_with_http_info",
"(",
"certificate_issuer_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"get_certificate_issuer_with_http_info",
"(",
"certificate_issuer_id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
]
| Get certificate issuer by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required)
:return: CertificateIssuerInfo
If the method is called asynchronously,
returns the request thread. | [
"Get",
"certificate",
"issuer",
"by",
"ID",
".",
"#",
"noqa",
":",
"E501"
]
| python | train | 51.25 |
townsenddw/jhubctl | jhubctl/clusters/providers/aws/aws.py | https://github.com/townsenddw/jhubctl/blob/c8c20f86a16e9d01dd90e4607d81423417cc773b/jhubctl/clusters/providers/aws/aws.py#L290-L296 | def delete_stack(self, stack_name):
"""Teardown a stack."""
get_stack(stack_name)
CLIENT.delete_stack(
StackName=stack_name
)
DELETE_WAITER.wait(StackName=stack_name) | [
"def",
"delete_stack",
"(",
"self",
",",
"stack_name",
")",
":",
"get_stack",
"(",
"stack_name",
")",
"CLIENT",
".",
"delete_stack",
"(",
"StackName",
"=",
"stack_name",
")",
"DELETE_WAITER",
".",
"wait",
"(",
"StackName",
"=",
"stack_name",
")"
]
| Teardown a stack. | [
"Teardown",
"a",
"stack",
"."
]
| python | train | 30.285714 |
python-diamond/Diamond | src/collectors/xfs/xfs.py | https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/xfs/xfs.py#L35-L263 | def collect(self):
"""
Collect xfs stats.
For an explanation of the following metrics visit
http://xfs.org/index.php/Runtime_Stats
https://github.com/torvalds/linux/blob/master/fs/xfs/xfs_stats.h
"""
data_structure = {
'extent_alloc': (
'alloc_extent',
'alloc_block',
'free_extent',
'free_block'
),
'abt': (
'lookup',
'compare',
'insrec',
'delrec'
),
'blk_map': (
'read_ops',
'write_ops',
'unmap',
'add_exlist',
'del_exlist',
'look_exlist',
'cmp_exlist'
),
'bmbt': (
'lookup',
'compare',
'insrec',
'delrec'
),
'dir': (
'lookup',
'create',
'remove',
'getdents'
),
'trans': (
'sync',
'async',
'empty'
),
'ig': (
'ig_attempts',
'ig_found',
'ig_frecycle',
'ig_missed',
'ig_dup',
'ig_reclaims',
'ig_attrchg'
),
'log': (
'writes',
'blocks',
'noiclogs',
'force',
'force_sleep'
),
'push_ail': (
'try_logspace',
'sleep_logspace',
'pushes',
'success',
'pushbuf',
'pinned',
'locked',
'flushing',
'restarts',
'flush'
),
'xstrat': (
'quick',
'split'
),
'rw': (
'write_calls',
'read_calls'
),
'attr': (
'get',
'set',
'remove',
'list'
),
'icluster': (
'iflush_count',
'icluster_flushcnt',
'icluster_flushinode'
),
'vnodes': (
'vn_active',
'vn_alloc',
'vn_get',
'vn_hold',
'vn_rele',
'vn_reclaim',
'vn_remove',
'vn_free'
),
'buf': (
'xb_get',
'xb_create',
'xb_get_locked',
'xb_get_locked_waited',
'xb_busy_locked',
'xb_miss_locked',
'xb_page_retries',
'xb_page_found',
'xb_get_read'
),
'abtb2': (
'xs_abtb_2_lookup',
'xs_abtb_2_compare',
'xs_abtb_2_insrec',
'xs_abtb_2_delrec',
'xs_abtb_2_newroot',
'xs_abtb_2_killroot',
'xs_abtb_2_increment',
'xs_abtb_2_decrement',
'xs_abtb_2_lshift',
'xs_abtb_2_rshift',
'xs_abtb_2_split',
'xs_abtb_2_join',
'xs_abtb_2_alloc',
'xs_abtb_2_free',
'xs_abtb_2_moves'
),
'abtc2': (
'xs_abtc_2_lookup',
'xs_abtc_2_compare',
'xs_abtc_2_insrec',
'xs_abtc_2_delrec',
'xs_abtc_2_newroot',
'xs_abtc_2_killroot',
'xs_abtc_2_increment',
'xs_abtc_2_decrement',
'xs_abtc_2_lshift',
'xs_abtc_2_rshift',
'xs_abtc_2_split',
'xs_abtc_2_join',
'xs_abtc_2_alloc',
'xs_abtc_2_free',
'xs_abtc_2_moves'
),
'bmbt2': (
'xs_bmbt_2_lookup',
'xs_bmbt_2_compare',
'xs_bmbt_2_insrec',
'xs_bmbt_2_delrec',
'xs_bmbt_2_newroot',
'xs_bmbt_2_killroot',
'xs_bmbt_2_increment',
'xs_bmbt_2_decrement',
'xs_bmbt_2_lshift',
'xs_bmbt_2_rshift',
'xs_bmbt_2_split',
'xs_bmbt_2_join',
'xs_bmbt_2_alloc',
'xs_bmbt_2_free',
'xs_bmbt_2_moves'
),
'ibt2': (
'lookup',
'compare',
'insrec',
'delrec',
'newroot',
'killroot',
'increment',
'decrement',
'lshift',
'rshift',
'split',
'join',
'alloc',
'free',
'moves'
),
'fibt2': (
'lookup',
'compare',
'insrec',
'delrec',
'newroot',
'killroot',
'increment',
'decrement',
'lshift',
'rshift',
'split',
'join',
'alloc',
'free',
'moves'
),
'qm': (
'xs_qm_dquot',
'xs_qm_dquot_unused'
),
'xpc': (
'xs_xstrat_bytes',
'xs_write_bytes',
'xs_read_bytes'
),
'debug': (
'debug',
)
}
f = open(self.PROC)
new_stats = f.readlines()
f.close()
stats = {}
for line in new_stats:
items = line.rstrip().split()
stats[items[0]] = [int(a) for a in items[1:]]
for key in stats.keys():
for item in enumerate(data_structure[key]):
metric_name = '.'.join([key, item[1]])
value = stats[key][item[0]]
self.publish_counter(metric_name, value) | [
"def",
"collect",
"(",
"self",
")",
":",
"data_structure",
"=",
"{",
"'extent_alloc'",
":",
"(",
"'alloc_extent'",
",",
"'alloc_block'",
",",
"'free_extent'",
",",
"'free_block'",
")",
",",
"'abt'",
":",
"(",
"'lookup'",
",",
"'compare'",
",",
"'insrec'",
",",
"'delrec'",
")",
",",
"'blk_map'",
":",
"(",
"'read_ops'",
",",
"'write_ops'",
",",
"'unmap'",
",",
"'add_exlist'",
",",
"'del_exlist'",
",",
"'look_exlist'",
",",
"'cmp_exlist'",
")",
",",
"'bmbt'",
":",
"(",
"'lookup'",
",",
"'compare'",
",",
"'insrec'",
",",
"'delrec'",
")",
",",
"'dir'",
":",
"(",
"'lookup'",
",",
"'create'",
",",
"'remove'",
",",
"'getdents'",
")",
",",
"'trans'",
":",
"(",
"'sync'",
",",
"'async'",
",",
"'empty'",
")",
",",
"'ig'",
":",
"(",
"'ig_attempts'",
",",
"'ig_found'",
",",
"'ig_frecycle'",
",",
"'ig_missed'",
",",
"'ig_dup'",
",",
"'ig_reclaims'",
",",
"'ig_attrchg'",
")",
",",
"'log'",
":",
"(",
"'writes'",
",",
"'blocks'",
",",
"'noiclogs'",
",",
"'force'",
",",
"'force_sleep'",
")",
",",
"'push_ail'",
":",
"(",
"'try_logspace'",
",",
"'sleep_logspace'",
",",
"'pushes'",
",",
"'success'",
",",
"'pushbuf'",
",",
"'pinned'",
",",
"'locked'",
",",
"'flushing'",
",",
"'restarts'",
",",
"'flush'",
")",
",",
"'xstrat'",
":",
"(",
"'quick'",
",",
"'split'",
")",
",",
"'rw'",
":",
"(",
"'write_calls'",
",",
"'read_calls'",
")",
",",
"'attr'",
":",
"(",
"'get'",
",",
"'set'",
",",
"'remove'",
",",
"'list'",
")",
",",
"'icluster'",
":",
"(",
"'iflush_count'",
",",
"'icluster_flushcnt'",
",",
"'icluster_flushinode'",
")",
",",
"'vnodes'",
":",
"(",
"'vn_active'",
",",
"'vn_alloc'",
",",
"'vn_get'",
",",
"'vn_hold'",
",",
"'vn_rele'",
",",
"'vn_reclaim'",
",",
"'vn_remove'",
",",
"'vn_free'",
")",
",",
"'buf'",
":",
"(",
"'xb_get'",
",",
"'xb_create'",
",",
"'xb_get_locked'",
",",
"'xb_get_locked_waited'",
",",
"'xb_busy_locked'",
",",
"'xb_miss_locked'",
",",
"'xb_page_retries'",
",",
"'xb_page_found'",
",",
"'xb_get_read'",
")",
",",
"'abtb2'",
":",
"(",
"'xs_abtb_2_lookup'",
",",
"'xs_abtb_2_compare'",
",",
"'xs_abtb_2_insrec'",
",",
"'xs_abtb_2_delrec'",
",",
"'xs_abtb_2_newroot'",
",",
"'xs_abtb_2_killroot'",
",",
"'xs_abtb_2_increment'",
",",
"'xs_abtb_2_decrement'",
",",
"'xs_abtb_2_lshift'",
",",
"'xs_abtb_2_rshift'",
",",
"'xs_abtb_2_split'",
",",
"'xs_abtb_2_join'",
",",
"'xs_abtb_2_alloc'",
",",
"'xs_abtb_2_free'",
",",
"'xs_abtb_2_moves'",
")",
",",
"'abtc2'",
":",
"(",
"'xs_abtc_2_lookup'",
",",
"'xs_abtc_2_compare'",
",",
"'xs_abtc_2_insrec'",
",",
"'xs_abtc_2_delrec'",
",",
"'xs_abtc_2_newroot'",
",",
"'xs_abtc_2_killroot'",
",",
"'xs_abtc_2_increment'",
",",
"'xs_abtc_2_decrement'",
",",
"'xs_abtc_2_lshift'",
",",
"'xs_abtc_2_rshift'",
",",
"'xs_abtc_2_split'",
",",
"'xs_abtc_2_join'",
",",
"'xs_abtc_2_alloc'",
",",
"'xs_abtc_2_free'",
",",
"'xs_abtc_2_moves'",
")",
",",
"'bmbt2'",
":",
"(",
"'xs_bmbt_2_lookup'",
",",
"'xs_bmbt_2_compare'",
",",
"'xs_bmbt_2_insrec'",
",",
"'xs_bmbt_2_delrec'",
",",
"'xs_bmbt_2_newroot'",
",",
"'xs_bmbt_2_killroot'",
",",
"'xs_bmbt_2_increment'",
",",
"'xs_bmbt_2_decrement'",
",",
"'xs_bmbt_2_lshift'",
",",
"'xs_bmbt_2_rshift'",
",",
"'xs_bmbt_2_split'",
",",
"'xs_bmbt_2_join'",
",",
"'xs_bmbt_2_alloc'",
",",
"'xs_bmbt_2_free'",
",",
"'xs_bmbt_2_moves'",
")",
",",
"'ibt2'",
":",
"(",
"'lookup'",
",",
"'compare'",
",",
"'insrec'",
",",
"'delrec'",
",",
"'newroot'",
",",
"'killroot'",
",",
"'increment'",
",",
"'decrement'",
",",
"'lshift'",
",",
"'rshift'",
",",
"'split'",
",",
"'join'",
",",
"'alloc'",
",",
"'free'",
",",
"'moves'",
")",
",",
"'fibt2'",
":",
"(",
"'lookup'",
",",
"'compare'",
",",
"'insrec'",
",",
"'delrec'",
",",
"'newroot'",
",",
"'killroot'",
",",
"'increment'",
",",
"'decrement'",
",",
"'lshift'",
",",
"'rshift'",
",",
"'split'",
",",
"'join'",
",",
"'alloc'",
",",
"'free'",
",",
"'moves'",
")",
",",
"'qm'",
":",
"(",
"'xs_qm_dquot'",
",",
"'xs_qm_dquot_unused'",
")",
",",
"'xpc'",
":",
"(",
"'xs_xstrat_bytes'",
",",
"'xs_write_bytes'",
",",
"'xs_read_bytes'",
")",
",",
"'debug'",
":",
"(",
"'debug'",
",",
")",
"}",
"f",
"=",
"open",
"(",
"self",
".",
"PROC",
")",
"new_stats",
"=",
"f",
".",
"readlines",
"(",
")",
"f",
".",
"close",
"(",
")",
"stats",
"=",
"{",
"}",
"for",
"line",
"in",
"new_stats",
":",
"items",
"=",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
")",
"stats",
"[",
"items",
"[",
"0",
"]",
"]",
"=",
"[",
"int",
"(",
"a",
")",
"for",
"a",
"in",
"items",
"[",
"1",
":",
"]",
"]",
"for",
"key",
"in",
"stats",
".",
"keys",
"(",
")",
":",
"for",
"item",
"in",
"enumerate",
"(",
"data_structure",
"[",
"key",
"]",
")",
":",
"metric_name",
"=",
"'.'",
".",
"join",
"(",
"[",
"key",
",",
"item",
"[",
"1",
"]",
"]",
")",
"value",
"=",
"stats",
"[",
"key",
"]",
"[",
"item",
"[",
"0",
"]",
"]",
"self",
".",
"publish_counter",
"(",
"metric_name",
",",
"value",
")"
]
| Collect xfs stats.
For an explanation of the following metrics visit
http://xfs.org/index.php/Runtime_Stats
https://github.com/torvalds/linux/blob/master/fs/xfs/xfs_stats.h | [
"Collect",
"xfs",
"stats",
"."
]
| python | train | 26.917031 |
expfactory/expfactory | expfactory/database/relational.py | https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/database/relational.py#L81-L95 | def print_user(self, user):
'''print a relational database user
'''
status = "active"
token = user.token
if token in ['finished', 'revoked']:
status = token
if token is None:
token = ''
subid = "%s\t%s[%s]" %(user.id, token, status)
print(subid)
return subid | [
"def",
"print_user",
"(",
"self",
",",
"user",
")",
":",
"status",
"=",
"\"active\"",
"token",
"=",
"user",
".",
"token",
"if",
"token",
"in",
"[",
"'finished'",
",",
"'revoked'",
"]",
":",
"status",
"=",
"token",
"if",
"token",
"is",
"None",
":",
"token",
"=",
"''",
"subid",
"=",
"\"%s\\t%s[%s]\"",
"%",
"(",
"user",
".",
"id",
",",
"token",
",",
"status",
")",
"print",
"(",
"subid",
")",
"return",
"subid"
]
| print a relational database user | [
"print",
"a",
"relational",
"database",
"user"
]
| python | train | 20.133333 |
saltstack/salt | salt/cloud/clouds/joyent.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/joyent.py#L717-L755 | def list_nodes(full=False, call=None):
'''
list of nodes, keeping only a brief listing
CLI Example:
.. code-block:: bash
salt-cloud -Q
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
if POLL_ALL_LOCATIONS:
for location in JOYENT_LOCATIONS:
result = query(command='my/machines', location=location,
method='GET')
if result[0] in VALID_RESPONSE_CODES:
nodes = result[1]
for node in nodes:
if 'name' in node:
node['location'] = location
ret[node['name']] = reformat_node(item=node, full=full)
else:
log.error('Invalid response when listing Joyent nodes: %s', result[1])
else:
location = get_location()
result = query(command='my/machines', location=location,
method='GET')
nodes = result[1]
for node in nodes:
if 'name' in node:
node['location'] = location
ret[node['name']] = reformat_node(item=node, full=full)
return ret | [
"def",
"list_nodes",
"(",
"full",
"=",
"False",
",",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_nodes function must be called with -f or --function.'",
")",
"ret",
"=",
"{",
"}",
"if",
"POLL_ALL_LOCATIONS",
":",
"for",
"location",
"in",
"JOYENT_LOCATIONS",
":",
"result",
"=",
"query",
"(",
"command",
"=",
"'my/machines'",
",",
"location",
"=",
"location",
",",
"method",
"=",
"'GET'",
")",
"if",
"result",
"[",
"0",
"]",
"in",
"VALID_RESPONSE_CODES",
":",
"nodes",
"=",
"result",
"[",
"1",
"]",
"for",
"node",
"in",
"nodes",
":",
"if",
"'name'",
"in",
"node",
":",
"node",
"[",
"'location'",
"]",
"=",
"location",
"ret",
"[",
"node",
"[",
"'name'",
"]",
"]",
"=",
"reformat_node",
"(",
"item",
"=",
"node",
",",
"full",
"=",
"full",
")",
"else",
":",
"log",
".",
"error",
"(",
"'Invalid response when listing Joyent nodes: %s'",
",",
"result",
"[",
"1",
"]",
")",
"else",
":",
"location",
"=",
"get_location",
"(",
")",
"result",
"=",
"query",
"(",
"command",
"=",
"'my/machines'",
",",
"location",
"=",
"location",
",",
"method",
"=",
"'GET'",
")",
"nodes",
"=",
"result",
"[",
"1",
"]",
"for",
"node",
"in",
"nodes",
":",
"if",
"'name'",
"in",
"node",
":",
"node",
"[",
"'location'",
"]",
"=",
"location",
"ret",
"[",
"node",
"[",
"'name'",
"]",
"]",
"=",
"reformat_node",
"(",
"item",
"=",
"node",
",",
"full",
"=",
"full",
")",
"return",
"ret"
]
| list of nodes, keeping only a brief listing
CLI Example:
.. code-block:: bash
salt-cloud -Q | [
"list",
"of",
"nodes",
"keeping",
"only",
"a",
"brief",
"listing"
]
| python | train | 31.461538 |
datamachine/twx.botapi | twx/botapi/botapi.py | https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L2599-L2668 | def send_audio(chat_id, audio,
caption=None, duration=None, performer=None, title=None, reply_to_message_id=None, reply_markup=None,
disable_notification=False, parse_mode=None, **kwargs):
"""
Use this method to send audio files, if you want Telegram clients to display them in the music player.
Your audio must be in the .mp3 format. On success, the sent Message is returned. Bots can currently send audio
files of up to 50 MB in size, this limit may be changed in the future.
For backward compatibility, when the fields title and performer are both empty and the mime-type of the file to
be sent is not audio/mpeg, the file will be sent as a playable voice message. For this to work, the audio must
be in an .ogg file encoded with OPUS. This behavior will be phased out in the future. For sending voice
messages, use the sendVoice method instead.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended),
pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data.
:param caption: Audio caption, 0-200 characters
:param duration: Duration of the audio in seconds
:param performer: Performer
:param title: Track name
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard,
:param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users
will receive a notification with no sound. Other apps coming soon.
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline
URLs in your bot's message.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type audio: InputFile or str
:type caption: str
:type duration: int
:type performer: str
:type title: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:type parse_mode: str
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest
"""
files = None
if isinstance(audio, InputFile):
files = [audio]
audio = None
elif not isinstance(audio, str):
raise Exception('audio must be instance of InputFile or str')
# required args
params = dict(
chat_id=chat_id,
audio=audio
)
# optional args
params.update(
_clean_params(
caption=caption,
duration=duration,
performer=performer,
title=title,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
disable_notification=disable_notification,
parse_mode=parse_mode,
)
)
return TelegramBotRPCRequest('sendAudio', params=params, files=files, on_result=Message.from_result, **kwargs) | [
"def",
"send_audio",
"(",
"chat_id",
",",
"audio",
",",
"caption",
"=",
"None",
",",
"duration",
"=",
"None",
",",
"performer",
"=",
"None",
",",
"title",
"=",
"None",
",",
"reply_to_message_id",
"=",
"None",
",",
"reply_markup",
"=",
"None",
",",
"disable_notification",
"=",
"False",
",",
"parse_mode",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"files",
"=",
"None",
"if",
"isinstance",
"(",
"audio",
",",
"InputFile",
")",
":",
"files",
"=",
"[",
"audio",
"]",
"audio",
"=",
"None",
"elif",
"not",
"isinstance",
"(",
"audio",
",",
"str",
")",
":",
"raise",
"Exception",
"(",
"'audio must be instance of InputFile or str'",
")",
"# required args",
"params",
"=",
"dict",
"(",
"chat_id",
"=",
"chat_id",
",",
"audio",
"=",
"audio",
")",
"# optional args",
"params",
".",
"update",
"(",
"_clean_params",
"(",
"caption",
"=",
"caption",
",",
"duration",
"=",
"duration",
",",
"performer",
"=",
"performer",
",",
"title",
"=",
"title",
",",
"reply_to_message_id",
"=",
"reply_to_message_id",
",",
"reply_markup",
"=",
"reply_markup",
",",
"disable_notification",
"=",
"disable_notification",
",",
"parse_mode",
"=",
"parse_mode",
",",
")",
")",
"return",
"TelegramBotRPCRequest",
"(",
"'sendAudio'",
",",
"params",
"=",
"params",
",",
"files",
"=",
"files",
",",
"on_result",
"=",
"Message",
".",
"from_result",
",",
"*",
"*",
"kwargs",
")"
]
| Use this method to send audio files, if you want Telegram clients to display them in the music player.
Your audio must be in the .mp3 format. On success, the sent Message is returned. Bots can currently send audio
files of up to 50 MB in size, this limit may be changed in the future.
For backward compatibility, when the fields title and performer are both empty and the mime-type of the file to
be sent is not audio/mpeg, the file will be sent as a playable voice message. For this to work, the audio must
be in an .ogg file encoded with OPUS. This behavior will be phased out in the future. For sending voice
messages, use the sendVoice method instead.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended),
pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data.
:param caption: Audio caption, 0-200 characters
:param duration: Duration of the audio in seconds
:param performer: Performer
:param title: Track name
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard,
:param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users
will receive a notification with no sound. Other apps coming soon.
:param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline
URLs in your bot's message.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type audio: InputFile or str
:type caption: str
:type duration: int
:type performer: str
:type title: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:type parse_mode: str
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest | [
"Use",
"this",
"method",
"to",
"send",
"audio",
"files",
"if",
"you",
"want",
"Telegram",
"clients",
"to",
"display",
"them",
"in",
"the",
"music",
"player",
"."
]
| python | train | 46.728571 |
wandb/client | wandb/vendor/prompt_toolkit/eventloop/posix.py | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/eventloop/posix.py#L271-L275 | def add_reader(self, fd, callback):
" Add read file descriptor to the event loop. "
fd = fd_to_int(fd)
self._read_fds[fd] = callback
self.selector.register(fd) | [
"def",
"add_reader",
"(",
"self",
",",
"fd",
",",
"callback",
")",
":",
"fd",
"=",
"fd_to_int",
"(",
"fd",
")",
"self",
".",
"_read_fds",
"[",
"fd",
"]",
"=",
"callback",
"self",
".",
"selector",
".",
"register",
"(",
"fd",
")"
]
| Add read file descriptor to the event loop. | [
"Add",
"read",
"file",
"descriptor",
"to",
"the",
"event",
"loop",
"."
]
| python | train | 37.4 |
cebel/pyctd | src/pyctd/manager/query.py | https://github.com/cebel/pyctd/blob/38ba02adaddb60cef031d3b75516773fe8a046b5/src/pyctd/manager/query.py#L84-L105 | def _join_disease(query, disease_definition, disease_id, disease_name):
"""helper function to add a query join to Disease model
:param sqlalchemy.orm.query.Query query: SQL Alchemy query
:param disease_definition:
:param str disease_id: see :attr:`models.Disease.disease_id`
:param disease_name:
:rtype: sqlalchemy.orm.query.Query
"""
if disease_definition or disease_id or disease_name:
query = query.join(models.Disease)
if disease_definition:
query = query.filter(models.Disease.definition.like(disease_definition))
if disease_id:
query = query.filter(models.Disease.disease_id == disease_id)
if disease_name:
query = query.filter(models.Disease.disease_name.like(disease_name))
return query | [
"def",
"_join_disease",
"(",
"query",
",",
"disease_definition",
",",
"disease_id",
",",
"disease_name",
")",
":",
"if",
"disease_definition",
"or",
"disease_id",
"or",
"disease_name",
":",
"query",
"=",
"query",
".",
"join",
"(",
"models",
".",
"Disease",
")",
"if",
"disease_definition",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"models",
".",
"Disease",
".",
"definition",
".",
"like",
"(",
"disease_definition",
")",
")",
"if",
"disease_id",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"models",
".",
"Disease",
".",
"disease_id",
"==",
"disease_id",
")",
"if",
"disease_name",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"models",
".",
"Disease",
".",
"disease_name",
".",
"like",
"(",
"disease_name",
")",
")",
"return",
"query"
]
| helper function to add a query join to Disease model
:param sqlalchemy.orm.query.Query query: SQL Alchemy query
:param disease_definition:
:param str disease_id: see :attr:`models.Disease.disease_id`
:param disease_name:
:rtype: sqlalchemy.orm.query.Query | [
"helper",
"function",
"to",
"add",
"a",
"query",
"join",
"to",
"Disease",
"model",
":",
"param",
"sqlalchemy",
".",
"orm",
".",
"query",
".",
"Query",
"query",
":",
"SQL",
"Alchemy",
"query",
":",
"param",
"disease_definition",
":",
":",
"param",
"str",
"disease_id",
":",
"see",
":",
"attr",
":",
"models",
".",
"Disease",
".",
"disease_id",
":",
"param",
"disease_name",
":",
":",
"rtype",
":",
"sqlalchemy",
".",
"orm",
".",
"query",
".",
"Query"
]
| python | train | 38.909091 |
Nukesor/pueue | pueue/client/socket.py | https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/client/socket.py#L7-L23 | def receive_data(socket):
"""Receive an answer from the daemon and return the response.
Args:
socket (socket.socket): A socket that is connected to the daemon.
Returns:
dir or string: The unpickled answer.
"""
answer = b""
while True:
packet = socket.recv(4096)
if not packet: break
answer += packet
response = pickle.loads(answer)
socket.close()
return response | [
"def",
"receive_data",
"(",
"socket",
")",
":",
"answer",
"=",
"b\"\"",
"while",
"True",
":",
"packet",
"=",
"socket",
".",
"recv",
"(",
"4096",
")",
"if",
"not",
"packet",
":",
"break",
"answer",
"+=",
"packet",
"response",
"=",
"pickle",
".",
"loads",
"(",
"answer",
")",
"socket",
".",
"close",
"(",
")",
"return",
"response"
]
| Receive an answer from the daemon and return the response.
Args:
socket (socket.socket): A socket that is connected to the daemon.
Returns:
dir or string: The unpickled answer. | [
"Receive",
"an",
"answer",
"from",
"the",
"daemon",
"and",
"return",
"the",
"response",
"."
]
| python | train | 24.705882 |
gem/oq-engine | openquake/hmtk/faults/mfd/anderson_luco_area_mmax.py | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/faults/mfd/anderson_luco_area_mmax.py#L83-L102 | def cumulative_value(self, slip, mmax, mag_value, bbar, dbar, beta):
'''
Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983)
'''
delta_m = (mmax - mag_value)
a_1 = self._get_a1_value(bbar, dbar, slip / 10., beta, mmax)
return a_1 * np.exp(bbar * delta_m) * (delta_m > 0.0) | [
"def",
"cumulative_value",
"(",
"self",
",",
"slip",
",",
"mmax",
",",
"mag_value",
",",
"bbar",
",",
"dbar",
",",
"beta",
")",
":",
"delta_m",
"=",
"(",
"mmax",
"-",
"mag_value",
")",
"a_1",
"=",
"self",
".",
"_get_a1_value",
"(",
"bbar",
",",
"dbar",
",",
"slip",
"/",
"10.",
",",
"beta",
",",
"mmax",
")",
"return",
"a_1",
"*",
"np",
".",
"exp",
"(",
"bbar",
"*",
"delta_m",
")",
"*",
"(",
"delta_m",
">",
"0.0",
")"
]
| Returns the rate of events with M > mag_value
:param float slip:
Slip rate in mm/yr
:param float mmax:
Maximum magnitude
:param float mag_value:
Magnitude value
:param float bbar:
\bar{b} parameter (effectively = b * log(10.))
:param float dbar:
\bar{d} parameter
:param float beta:
Beta value of formula defined in Eq. 20 of Anderson & Luco (1983) | [
"Returns",
"the",
"rate",
"of",
"events",
"with",
"M",
">",
"mag_value"
]
| python | train | 35.95 |
koalalorenzo/python-digitalocean | digitalocean/SSHKey.py | https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/SSHKey.py#L73-L89 | def edit(self):
"""
Edit the SSH Key
"""
input_params = {
"name": self.name,
"public_key": self.public_key,
}
data = self.get_data(
"account/keys/%s" % self.id,
type=PUT,
params=input_params
)
if data:
self.id = data['ssh_key']['id'] | [
"def",
"edit",
"(",
"self",
")",
":",
"input_params",
"=",
"{",
"\"name\"",
":",
"self",
".",
"name",
",",
"\"public_key\"",
":",
"self",
".",
"public_key",
",",
"}",
"data",
"=",
"self",
".",
"get_data",
"(",
"\"account/keys/%s\"",
"%",
"self",
".",
"id",
",",
"type",
"=",
"PUT",
",",
"params",
"=",
"input_params",
")",
"if",
"data",
":",
"self",
".",
"id",
"=",
"data",
"[",
"'ssh_key'",
"]",
"[",
"'id'",
"]"
]
| Edit the SSH Key | [
"Edit",
"the",
"SSH",
"Key"
]
| python | valid | 21.117647 |
revarbat/ssltelnet | ssltelnet/__init__.py | https://github.com/revarbat/ssltelnet/blob/f2d6171c168f3f8b51a0793323cfaa27bc01a3e0/ssltelnet/__init__.py#L61-L68 | def open(self, *args, **kwargs):
"""
Works exactly like the Telnet.open() call from the telnetlib
module, except SSL/TLS may be transparently negotiated.
"""
Telnet.open(self, *args, **kwargs)
if self.force_ssl:
self._start_tls() | [
"def",
"open",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"Telnet",
".",
"open",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"force_ssl",
":",
"self",
".",
"_start_tls",
"(",
")"
]
| Works exactly like the Telnet.open() call from the telnetlib
module, except SSL/TLS may be transparently negotiated. | [
"Works",
"exactly",
"like",
"the",
"Telnet",
".",
"open",
"()",
"call",
"from",
"the",
"telnetlib",
"module",
"except",
"SSL",
"/",
"TLS",
"may",
"be",
"transparently",
"negotiated",
"."
]
| python | train | 35.25 |
tensorflow/cleverhans | cleverhans/attacks/bapp.py | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/bapp.py#L504-L519 | def geometric_progression_for_stepsize(x, update, dist, decision_function,
current_iteration):
""" Geometric progression to search for stepsize.
Keep decreasing stepsize by half until reaching
the desired side of the boundary.
"""
epsilon = dist / np.sqrt(current_iteration)
while True:
updated = x + epsilon * update
success = decision_function(updated[None])[0]
if success:
break
else:
epsilon = epsilon / 2.0
return epsilon | [
"def",
"geometric_progression_for_stepsize",
"(",
"x",
",",
"update",
",",
"dist",
",",
"decision_function",
",",
"current_iteration",
")",
":",
"epsilon",
"=",
"dist",
"/",
"np",
".",
"sqrt",
"(",
"current_iteration",
")",
"while",
"True",
":",
"updated",
"=",
"x",
"+",
"epsilon",
"*",
"update",
"success",
"=",
"decision_function",
"(",
"updated",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"if",
"success",
":",
"break",
"else",
":",
"epsilon",
"=",
"epsilon",
"/",
"2.0",
"return",
"epsilon"
]
| Geometric progression to search for stepsize.
Keep decreasing stepsize by half until reaching
the desired side of the boundary. | [
"Geometric",
"progression",
"to",
"search",
"for",
"stepsize",
".",
"Keep",
"decreasing",
"stepsize",
"by",
"half",
"until",
"reaching",
"the",
"desired",
"side",
"of",
"the",
"boundary",
"."
]
| python | train | 31.3125 |
hannes-brt/hebel | hebel/pycuda_ops/cublas.py | https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L2534-L2546 | def cublasDsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for real symmetric-banded matrix.
"""
status = _libcublas.cublasDsbmv_v2(handle,
_CUBLAS_FILL_MODE[uplo], n, k,
ctypes.byref(ctypes.c_double(alpha)),
int(A), lda, int(x), incx,
ctypes.byref(ctypes.c_double(beta)),
int(y), incy)
cublasCheckStatus(status) | [
"def",
"cublasDsbmv",
"(",
"handle",
",",
"uplo",
",",
"n",
",",
"k",
",",
"alpha",
",",
"A",
",",
"lda",
",",
"x",
",",
"incx",
",",
"beta",
",",
"y",
",",
"incy",
")",
":",
"status",
"=",
"_libcublas",
".",
"cublasDsbmv_v2",
"(",
"handle",
",",
"_CUBLAS_FILL_MODE",
"[",
"uplo",
"]",
",",
"n",
",",
"k",
",",
"ctypes",
".",
"byref",
"(",
"ctypes",
".",
"c_double",
"(",
"alpha",
")",
")",
",",
"int",
"(",
"A",
")",
",",
"lda",
",",
"int",
"(",
"x",
")",
",",
"incx",
",",
"ctypes",
".",
"byref",
"(",
"ctypes",
".",
"c_double",
"(",
"beta",
")",
")",
",",
"int",
"(",
"y",
")",
",",
"incy",
")",
"cublasCheckStatus",
"(",
"status",
")"
]
| Matrix-vector product for real symmetric-banded matrix. | [
"Matrix",
"-",
"vector",
"product",
"for",
"real",
"symmetric",
"-",
"banded",
"matrix",
"."
]
| python | train | 43.076923 |
hph/mov | mov.py | https://github.com/hph/mov/blob/36a18d92836e1aff74ca02e16ce09d1c46e111b9/mov.py#L88-L101 | def create():
"""Create a new database with information about the films in the specified
directory or directories."""
if not all(map(os.path.isdir, ARGS.directory)):
exit('Error: One or more of the specified directories does not exist.')
with sqlite3.connect(ARGS.database) as connection:
connection.text_factory = str
cursor = connection.cursor()
cursor.execute('DROP TABLE IF EXISTS Movies')
cursor.execute('''CREATE TABLE Movies(name TEXT, path TEXT, size TEXT,
files BLOB)''')
for dir in ARGS.directory:
cursor.executemany('INSERT INTO Movies VALUES(?, ?, ?, ?)',
local_data(dir)) | [
"def",
"create",
"(",
")",
":",
"if",
"not",
"all",
"(",
"map",
"(",
"os",
".",
"path",
".",
"isdir",
",",
"ARGS",
".",
"directory",
")",
")",
":",
"exit",
"(",
"'Error: One or more of the specified directories does not exist.'",
")",
"with",
"sqlite3",
".",
"connect",
"(",
"ARGS",
".",
"database",
")",
"as",
"connection",
":",
"connection",
".",
"text_factory",
"=",
"str",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"'DROP TABLE IF EXISTS Movies'",
")",
"cursor",
".",
"execute",
"(",
"'''CREATE TABLE Movies(name TEXT, path TEXT, size TEXT,\n files BLOB)'''",
")",
"for",
"dir",
"in",
"ARGS",
".",
"directory",
":",
"cursor",
".",
"executemany",
"(",
"'INSERT INTO Movies VALUES(?, ?, ?, ?)'",
",",
"local_data",
"(",
"dir",
")",
")"
]
| Create a new database with information about the films in the specified
directory or directories. | [
"Create",
"a",
"new",
"database",
"with",
"information",
"about",
"the",
"films",
"in",
"the",
"specified",
"directory",
"or",
"directories",
"."
]
| python | train | 50.5 |
neovim/pynvim | pynvim/msgpack_rpc/msgpack_stream.py | https://github.com/neovim/pynvim/blob/5e577188e6d7133f597ad0ce60dc6a4b1314064a/pynvim/msgpack_rpc/msgpack_stream.py#L31-L34 | def send(self, msg):
"""Queue `msg` for sending to Nvim."""
debug('sent %s', msg)
self.loop.send(self._packer.pack(msg)) | [
"def",
"send",
"(",
"self",
",",
"msg",
")",
":",
"debug",
"(",
"'sent %s'",
",",
"msg",
")",
"self",
".",
"loop",
".",
"send",
"(",
"self",
".",
"_packer",
".",
"pack",
"(",
"msg",
")",
")"
]
| Queue `msg` for sending to Nvim. | [
"Queue",
"msg",
"for",
"sending",
"to",
"Nvim",
"."
]
| python | train | 35.25 |
nfcpy/nfcpy | src/nfc/tag/tt1.py | https://github.com/nfcpy/nfcpy/blob/6649146d1afdd5e82b2b6b1ea00aa58d50785117/src/nfc/tag/tt1.py#L354-L361 | def protect(self, password=None, read_protect=False, protect_from=0):
"""The implementation of :meth:`nfc.tag.Tag.protect` for a generic
type 1 tag is limited to setting the NDEF data read-only for
tags that are already NDEF formatted.
"""
return super(Type1Tag, self).protect(
password, read_protect, protect_from) | [
"def",
"protect",
"(",
"self",
",",
"password",
"=",
"None",
",",
"read_protect",
"=",
"False",
",",
"protect_from",
"=",
"0",
")",
":",
"return",
"super",
"(",
"Type1Tag",
",",
"self",
")",
".",
"protect",
"(",
"password",
",",
"read_protect",
",",
"protect_from",
")"
]
| The implementation of :meth:`nfc.tag.Tag.protect` for a generic
type 1 tag is limited to setting the NDEF data read-only for
tags that are already NDEF formatted. | [
"The",
"implementation",
"of",
":",
"meth",
":",
"nfc",
".",
"tag",
".",
"Tag",
".",
"protect",
"for",
"a",
"generic",
"type",
"1",
"tag",
"is",
"limited",
"to",
"setting",
"the",
"NDEF",
"data",
"read",
"-",
"only",
"for",
"tags",
"that",
"are",
"already",
"NDEF",
"formatted",
"."
]
| python | train | 45.125 |
archman/beamline | beamline/models.py | https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/models.py#L216-L226 | def getElementsByName(self, name):
""" get element with given name,
return list of element objects regarding to 'name'
:param name: element name, case sensitive, if elements are
auto-generated from LteParser, the name should be lower cased.
"""
try:
return filter(lambda x: x.name == name, self._lattice_eleobjlist)
except:
return [] | [
"def",
"getElementsByName",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"return",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"name",
"==",
"name",
",",
"self",
".",
"_lattice_eleobjlist",
")",
"except",
":",
"return",
"[",
"]"
]
| get element with given name,
return list of element objects regarding to 'name'
:param name: element name, case sensitive, if elements are
auto-generated from LteParser, the name should be lower cased. | [
"get",
"element",
"with",
"given",
"name",
"return",
"list",
"of",
"element",
"objects",
"regarding",
"to",
"name"
]
| python | train | 38.181818 |
crackinglandia/pype32 | pype32/pype32.py | https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/pype32/pype32.py#L849-L858 | def isPe64(self):
"""
Determines if the current L{PE} instance is a PE64 file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}.
"""
if self.ntHeaders.optionalHeader.magic.value == consts.PE64:
return True
return False | [
"def",
"isPe64",
"(",
"self",
")",
":",
"if",
"self",
".",
"ntHeaders",
".",
"optionalHeader",
".",
"magic",
".",
"value",
"==",
"consts",
".",
"PE64",
":",
"return",
"True",
"return",
"False"
]
| Determines if the current L{PE} instance is a PE64 file.
@rtype: bool
@return: C{True} if the current L{PE} instance is a PE64 file. Otherwise, returns C{False}. | [
"Determines",
"if",
"the",
"current",
"L",
"{",
"PE",
"}",
"instance",
"is",
"a",
"PE64",
"file",
"."
]
| python | train | 34.1 |
bitlabstudio/django-dashboard-app | dashboard_app/decorators.py | https://github.com/bitlabstudio/django-dashboard-app/blob/ed98f2bca91a4ced36d0dd1aa1baee78e989cf64/dashboard_app/decorators.py#L11-L31 | def permission_required(perm, login_url=None, raise_exception=False):
"""
Re-implementation of the permission_required decorator, honors settings.
If ``DASHBOARD_REQUIRE_LOGIN`` is False, this decorator will always return
``True``, otherwise it will check for the permission as usual.
"""
def check_perms(user):
if not getattr(settings, 'DASHBOARD_REQUIRE_LOGIN',
app_settings.REQUIRE_LOGIN):
return True
# First check if the user has the permission (even anon users)
if user.has_perm(perm):
return True
# In case the 403 handler should be called raise the exception
if raise_exception: # pragma: no cover
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url) | [
"def",
"permission_required",
"(",
"perm",
",",
"login_url",
"=",
"None",
",",
"raise_exception",
"=",
"False",
")",
":",
"def",
"check_perms",
"(",
"user",
")",
":",
"if",
"not",
"getattr",
"(",
"settings",
",",
"'DASHBOARD_REQUIRE_LOGIN'",
",",
"app_settings",
".",
"REQUIRE_LOGIN",
")",
":",
"return",
"True",
"# First check if the user has the permission (even anon users)",
"if",
"user",
".",
"has_perm",
"(",
"perm",
")",
":",
"return",
"True",
"# In case the 403 handler should be called raise the exception",
"if",
"raise_exception",
":",
"# pragma: no cover",
"raise",
"PermissionDenied",
"# As the last resort, show the login form",
"return",
"False",
"return",
"user_passes_test",
"(",
"check_perms",
",",
"login_url",
"=",
"login_url",
")"
]
| Re-implementation of the permission_required decorator, honors settings.
If ``DASHBOARD_REQUIRE_LOGIN`` is False, this decorator will always return
``True``, otherwise it will check for the permission as usual. | [
"Re",
"-",
"implementation",
"of",
"the",
"permission_required",
"decorator",
"honors",
"settings",
"."
]
| python | test | 41.285714 |
ella/ella | ella/utils/installedapps.py | https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/utils/installedapps.py#L27-L45 | def call_modules(auto_discover=()):
"""
this is called in project urls.py
for registering desired modules (eg.: admin.py)
"""
for app in settings.INSTALLED_APPS:
modules = set(auto_discover)
if app in INSTALLED_APPS_REGISTER:
modules.update(INSTALLED_APPS_REGISTER[app])
for module in modules:
mod = import_module(app)
try:
import_module('%s.%s' % (app, module))
inst = getattr(mod, '__install__', lambda: None)
inst()
except:
if module_has_submodule(mod, module):
raise
app_modules_loaded.send(sender=None) | [
"def",
"call_modules",
"(",
"auto_discover",
"=",
"(",
")",
")",
":",
"for",
"app",
"in",
"settings",
".",
"INSTALLED_APPS",
":",
"modules",
"=",
"set",
"(",
"auto_discover",
")",
"if",
"app",
"in",
"INSTALLED_APPS_REGISTER",
":",
"modules",
".",
"update",
"(",
"INSTALLED_APPS_REGISTER",
"[",
"app",
"]",
")",
"for",
"module",
"in",
"modules",
":",
"mod",
"=",
"import_module",
"(",
"app",
")",
"try",
":",
"import_module",
"(",
"'%s.%s'",
"%",
"(",
"app",
",",
"module",
")",
")",
"inst",
"=",
"getattr",
"(",
"mod",
",",
"'__install__'",
",",
"lambda",
":",
"None",
")",
"inst",
"(",
")",
"except",
":",
"if",
"module_has_submodule",
"(",
"mod",
",",
"module",
")",
":",
"raise",
"app_modules_loaded",
".",
"send",
"(",
"sender",
"=",
"None",
")"
]
| this is called in project urls.py
for registering desired modules (eg.: admin.py) | [
"this",
"is",
"called",
"in",
"project",
"urls",
".",
"py",
"for",
"registering",
"desired",
"modules",
"(",
"eg",
".",
":",
"admin",
".",
"py",
")"
]
| python | train | 35.210526 |
tanghaibao/jcvi | jcvi/assembly/hic.py | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L1099-L1127 | def prepare_synteny(tourfile, lastfile, odir, p, opts):
"""
Prepare synteny plots for movie().
"""
qbedfile, sbedfile = get_bed_filenames(lastfile, p, opts)
qbedfile = op.abspath(qbedfile)
sbedfile = op.abspath(sbedfile)
qbed = Bed(qbedfile, sorted=False)
contig_to_beds = dict(qbed.sub_beds())
# Create a separate directory for the subplots and movie
mkdir(odir, overwrite=True)
os.chdir(odir)
logging.debug("Change into subdir `{}`".format(odir))
# Make anchorsfile
anchorsfile = ".".join(op.basename(lastfile).split(".", 2)[:2]) \
+ ".anchors"
fw = open(anchorsfile, "w")
for b in Blast(lastfile):
print("\t".join((gene_name(b.query), gene_name(b.subject),
str(int(b.score)))), file=fw)
fw.close()
# Symlink sbed
symlink(sbedfile, op.basename(sbedfile))
return anchorsfile, qbedfile, contig_to_beds | [
"def",
"prepare_synteny",
"(",
"tourfile",
",",
"lastfile",
",",
"odir",
",",
"p",
",",
"opts",
")",
":",
"qbedfile",
",",
"sbedfile",
"=",
"get_bed_filenames",
"(",
"lastfile",
",",
"p",
",",
"opts",
")",
"qbedfile",
"=",
"op",
".",
"abspath",
"(",
"qbedfile",
")",
"sbedfile",
"=",
"op",
".",
"abspath",
"(",
"sbedfile",
")",
"qbed",
"=",
"Bed",
"(",
"qbedfile",
",",
"sorted",
"=",
"False",
")",
"contig_to_beds",
"=",
"dict",
"(",
"qbed",
".",
"sub_beds",
"(",
")",
")",
"# Create a separate directory for the subplots and movie",
"mkdir",
"(",
"odir",
",",
"overwrite",
"=",
"True",
")",
"os",
".",
"chdir",
"(",
"odir",
")",
"logging",
".",
"debug",
"(",
"\"Change into subdir `{}`\"",
".",
"format",
"(",
"odir",
")",
")",
"# Make anchorsfile",
"anchorsfile",
"=",
"\".\"",
".",
"join",
"(",
"op",
".",
"basename",
"(",
"lastfile",
")",
".",
"split",
"(",
"\".\"",
",",
"2",
")",
"[",
":",
"2",
"]",
")",
"+",
"\".anchors\"",
"fw",
"=",
"open",
"(",
"anchorsfile",
",",
"\"w\"",
")",
"for",
"b",
"in",
"Blast",
"(",
"lastfile",
")",
":",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"(",
"gene_name",
"(",
"b",
".",
"query",
")",
",",
"gene_name",
"(",
"b",
".",
"subject",
")",
",",
"str",
"(",
"int",
"(",
"b",
".",
"score",
")",
")",
")",
")",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"# Symlink sbed",
"symlink",
"(",
"sbedfile",
",",
"op",
".",
"basename",
"(",
"sbedfile",
")",
")",
"return",
"anchorsfile",
",",
"qbedfile",
",",
"contig_to_beds"
]
| Prepare synteny plots for movie(). | [
"Prepare",
"synteny",
"plots",
"for",
"movie",
"()",
"."
]
| python | train | 31.586207 |
fermiPy/fermipy | fermipy/hpx_utils.py | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/hpx_utils.py#L98-L110 | def hpx_to_coords(h, shape):
""" Generate an N x D list of pixel center coordinates where N is
the number of pixels and D is the dimensionality of the map."""
x, z = hpx_to_axes(h, shape)
x = np.sqrt(x[0:-1] * x[1:])
z = z[:-1] + 0.5
x = np.ravel(np.ones(shape) * x[:, np.newaxis])
z = np.ravel(np.ones(shape) * z[np.newaxis, :])
return np.vstack((x, z)) | [
"def",
"hpx_to_coords",
"(",
"h",
",",
"shape",
")",
":",
"x",
",",
"z",
"=",
"hpx_to_axes",
"(",
"h",
",",
"shape",
")",
"x",
"=",
"np",
".",
"sqrt",
"(",
"x",
"[",
"0",
":",
"-",
"1",
"]",
"*",
"x",
"[",
"1",
":",
"]",
")",
"z",
"=",
"z",
"[",
":",
"-",
"1",
"]",
"+",
"0.5",
"x",
"=",
"np",
".",
"ravel",
"(",
"np",
".",
"ones",
"(",
"shape",
")",
"*",
"x",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"z",
"=",
"np",
".",
"ravel",
"(",
"np",
".",
"ones",
"(",
"shape",
")",
"*",
"z",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
")",
"return",
"np",
".",
"vstack",
"(",
"(",
"x",
",",
"z",
")",
")"
]
| Generate an N x D list of pixel center coordinates where N is
the number of pixels and D is the dimensionality of the map. | [
"Generate",
"an",
"N",
"x",
"D",
"list",
"of",
"pixel",
"center",
"coordinates",
"where",
"N",
"is",
"the",
"number",
"of",
"pixels",
"and",
"D",
"is",
"the",
"dimensionality",
"of",
"the",
"map",
"."
]
| python | train | 29.076923 |
jstitch/MambuPy | MambuPy/mambuutil.py | https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/mambuutil.py#L197-L252 | def getloansurl(idcred, *args, **kwargs):
"""Request Loans URL.
If idcred is set, you'll get a response adequate for a MambuLoan object.
If not set, you'll get a response adequate for a MambuLoans object.
See mambuloan module and pydoc for further information.
Currently implemented filter parameters:
* fullDetails
* accountState
* branchId
* centreId
* creditOfficerUsername
* limit
* offset
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future.
"""
getparams = []
if kwargs:
try:
if kwargs["fullDetails"] == True:
getparams.append("fullDetails=true")
else:
getparams.append("fullDetails=false")
except Exception as ex:
pass
try:
getparams.append("accountState=%s" % kwargs["accountState"])
except Exception as ex:
pass
try:
getparams.append("branchId=%s" % kwargs["branchId"])
except Exception as ex:
pass
try:
getparams.append("centreId=%s" % kwargs["centreId"])
except Exception as ex:
pass
try:
getparams.append("creditOfficerUsername=%s" % kwargs["creditOfficerUsername"])
except Exception as ex:
pass
try:
getparams.append("offset=%s" % kwargs["offset"])
except Exception as ex:
pass
try:
getparams.append("limit=%s" % kwargs["limit"])
except Exception as ex:
pass
idcredparam = "" if idcred == "" else "/"+idcred
url = getmambuurl(*args,**kwargs) + "loans" + idcredparam + ("" if len(getparams) == 0 else "?" + "&".join(getparams) )
return url | [
"def",
"getloansurl",
"(",
"idcred",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"getparams",
"=",
"[",
"]",
"if",
"kwargs",
":",
"try",
":",
"if",
"kwargs",
"[",
"\"fullDetails\"",
"]",
"==",
"True",
":",
"getparams",
".",
"append",
"(",
"\"fullDetails=true\"",
")",
"else",
":",
"getparams",
".",
"append",
"(",
"\"fullDetails=false\"",
")",
"except",
"Exception",
"as",
"ex",
":",
"pass",
"try",
":",
"getparams",
".",
"append",
"(",
"\"accountState=%s\"",
"%",
"kwargs",
"[",
"\"accountState\"",
"]",
")",
"except",
"Exception",
"as",
"ex",
":",
"pass",
"try",
":",
"getparams",
".",
"append",
"(",
"\"branchId=%s\"",
"%",
"kwargs",
"[",
"\"branchId\"",
"]",
")",
"except",
"Exception",
"as",
"ex",
":",
"pass",
"try",
":",
"getparams",
".",
"append",
"(",
"\"centreId=%s\"",
"%",
"kwargs",
"[",
"\"centreId\"",
"]",
")",
"except",
"Exception",
"as",
"ex",
":",
"pass",
"try",
":",
"getparams",
".",
"append",
"(",
"\"creditOfficerUsername=%s\"",
"%",
"kwargs",
"[",
"\"creditOfficerUsername\"",
"]",
")",
"except",
"Exception",
"as",
"ex",
":",
"pass",
"try",
":",
"getparams",
".",
"append",
"(",
"\"offset=%s\"",
"%",
"kwargs",
"[",
"\"offset\"",
"]",
")",
"except",
"Exception",
"as",
"ex",
":",
"pass",
"try",
":",
"getparams",
".",
"append",
"(",
"\"limit=%s\"",
"%",
"kwargs",
"[",
"\"limit\"",
"]",
")",
"except",
"Exception",
"as",
"ex",
":",
"pass",
"idcredparam",
"=",
"\"\"",
"if",
"idcred",
"==",
"\"\"",
"else",
"\"/\"",
"+",
"idcred",
"url",
"=",
"getmambuurl",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"+",
"\"loans\"",
"+",
"idcredparam",
"+",
"(",
"\"\"",
"if",
"len",
"(",
"getparams",
")",
"==",
"0",
"else",
"\"?\"",
"+",
"\"&\"",
".",
"join",
"(",
"getparams",
")",
")",
"return",
"url"
]
| Request Loans URL.
If idcred is set, you'll get a response adequate for a MambuLoan object.
If not set, you'll get a response adequate for a MambuLoans object.
See mambuloan module and pydoc for further information.
Currently implemented filter parameters:
* fullDetails
* accountState
* branchId
* centreId
* creditOfficerUsername
* limit
* offset
See Mambu official developer documentation for further details, and
info on parameters that may be implemented here in the future. | [
"Request",
"Loans",
"URL",
"."
]
| python | train | 31.821429 |
hasgeek/coaster | coaster/views/decorators.py | https://github.com/hasgeek/coaster/blob/07f7eb5d5f516e22fa14fdf4dc70e0ae13ee398d/coaster/views/decorators.py#L601-L639 | def requires_permission(permission):
"""
View decorator that requires a certain permission to be present in
``current_auth.permissions`` before the view is allowed to proceed.
Aborts with ``403 Forbidden`` if the permission is not present.
The decorated view will have an ``is_available`` method that can be called
to perform the same test.
:param permission: Permission that is required. If a collection type is
provided, any one permission must be available
"""
def inner(f):
def is_available_here():
if not hasattr(current_auth, 'permissions'):
return False
elif is_collection(permission):
return bool(current_auth.permissions.intersection(permission))
else:
return permission in current_auth.permissions
def is_available(context=None):
result = is_available_here()
if result and hasattr(f, 'is_available'):
# We passed, but we're wrapping another test, so ask there as well
return f.is_available(context)
return result
@wraps(f)
def wrapper(*args, **kwargs):
add_auth_attribute('login_required', True)
if not is_available_here():
abort(403)
return f(*args, **kwargs)
wrapper.requires_permission = permission
wrapper.is_available = is_available
return wrapper
return inner | [
"def",
"requires_permission",
"(",
"permission",
")",
":",
"def",
"inner",
"(",
"f",
")",
":",
"def",
"is_available_here",
"(",
")",
":",
"if",
"not",
"hasattr",
"(",
"current_auth",
",",
"'permissions'",
")",
":",
"return",
"False",
"elif",
"is_collection",
"(",
"permission",
")",
":",
"return",
"bool",
"(",
"current_auth",
".",
"permissions",
".",
"intersection",
"(",
"permission",
")",
")",
"else",
":",
"return",
"permission",
"in",
"current_auth",
".",
"permissions",
"def",
"is_available",
"(",
"context",
"=",
"None",
")",
":",
"result",
"=",
"is_available_here",
"(",
")",
"if",
"result",
"and",
"hasattr",
"(",
"f",
",",
"'is_available'",
")",
":",
"# We passed, but we're wrapping another test, so ask there as well",
"return",
"f",
".",
"is_available",
"(",
"context",
")",
"return",
"result",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"add_auth_attribute",
"(",
"'login_required'",
",",
"True",
")",
"if",
"not",
"is_available_here",
"(",
")",
":",
"abort",
"(",
"403",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"wrapper",
".",
"requires_permission",
"=",
"permission",
"wrapper",
".",
"is_available",
"=",
"is_available",
"return",
"wrapper",
"return",
"inner"
]
| View decorator that requires a certain permission to be present in
``current_auth.permissions`` before the view is allowed to proceed.
Aborts with ``403 Forbidden`` if the permission is not present.
The decorated view will have an ``is_available`` method that can be called
to perform the same test.
:param permission: Permission that is required. If a collection type is
provided, any one permission must be available | [
"View",
"decorator",
"that",
"requires",
"a",
"certain",
"permission",
"to",
"be",
"present",
"in",
"current_auth",
".",
"permissions",
"before",
"the",
"view",
"is",
"allowed",
"to",
"proceed",
".",
"Aborts",
"with",
"403",
"Forbidden",
"if",
"the",
"permission",
"is",
"not",
"present",
"."
]
| python | train | 37.179487 |
census-instrumentation/opencensus-python | contrib/opencensus-ext-azure/opencensus/ext/azure/trace_exporter/__init__.py | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-azure/opencensus/ext/azure/trace_exporter/__init__.py#L239-L249 | def emit(self, span_datas):
"""
:type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit
"""
envelopes = [self.span_data_to_envelope(sd) for sd in span_datas]
result = self._transmit(envelopes)
if result > 0:
self.storage.put(envelopes, result) | [
"def",
"emit",
"(",
"self",
",",
"span_datas",
")",
":",
"envelopes",
"=",
"[",
"self",
".",
"span_data_to_envelope",
"(",
"sd",
")",
"for",
"sd",
"in",
"span_datas",
"]",
"result",
"=",
"self",
".",
"_transmit",
"(",
"envelopes",
")",
"if",
"result",
">",
"0",
":",
"self",
".",
"storage",
".",
"put",
"(",
"envelopes",
",",
"result",
")"
]
| :type span_datas: list of :class:
`~opencensus.trace.span_data.SpanData`
:param list of opencensus.trace.span_data.SpanData span_datas:
SpanData tuples to emit | [
":",
"type",
"span_datas",
":",
"list",
"of",
":",
"class",
":",
"~opencensus",
".",
"trace",
".",
"span_data",
".",
"SpanData",
":",
"param",
"list",
"of",
"opencensus",
".",
"trace",
".",
"span_data",
".",
"SpanData",
"span_datas",
":",
"SpanData",
"tuples",
"to",
"emit"
]
| python | train | 39 |
toumorokoshi/sprinter | sprinter/core/manifest.py | https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/core/manifest.py#L248-L262 | def __substitute_objects(self, value, context_dict):
"""
recursively substitute value with the context_dict
"""
if type(value) == dict:
return dict([(k, self.__substitute_objects(v, context_dict)) for k, v in value.items()])
elif type(value) == str:
try:
return value % context_dict
except KeyError:
e = sys.exc_info()[1]
logger.warn("Could not specialize %s! Error: %s" % (value, e))
return value
else:
return value | [
"def",
"__substitute_objects",
"(",
"self",
",",
"value",
",",
"context_dict",
")",
":",
"if",
"type",
"(",
"value",
")",
"==",
"dict",
":",
"return",
"dict",
"(",
"[",
"(",
"k",
",",
"self",
".",
"__substitute_objects",
"(",
"v",
",",
"context_dict",
")",
")",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
"]",
")",
"elif",
"type",
"(",
"value",
")",
"==",
"str",
":",
"try",
":",
"return",
"value",
"%",
"context_dict",
"except",
"KeyError",
":",
"e",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"logger",
".",
"warn",
"(",
"\"Could not specialize %s! Error: %s\"",
"%",
"(",
"value",
",",
"e",
")",
")",
"return",
"value",
"else",
":",
"return",
"value"
]
| recursively substitute value with the context_dict | [
"recursively",
"substitute",
"value",
"with",
"the",
"context_dict"
]
| python | train | 37.466667 |
shexSpec/grammar | parsers/python/pyshexc/parser_impl/shex_annotations_and_semacts_parser.py | https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_annotations_and_semacts_parser.py#L42-L50 | def visitAnnotation(self, ctx: ShExDocParser.AnnotationContext):
""" annotation: '//' predicate (iri | literal) """
# Annotations apply to the expression, NOT the shape (!)
annot = Annotation(self.context.predicate_to_IRI(ctx.predicate()))
if ctx.iri():
annot.object = self.context.iri_to_iriref(ctx.iri())
else:
annot.object = self.context.literal_to_ObjectLiteral(ctx.literal())
self.annotations.append(annot) | [
"def",
"visitAnnotation",
"(",
"self",
",",
"ctx",
":",
"ShExDocParser",
".",
"AnnotationContext",
")",
":",
"# Annotations apply to the expression, NOT the shape (!)",
"annot",
"=",
"Annotation",
"(",
"self",
".",
"context",
".",
"predicate_to_IRI",
"(",
"ctx",
".",
"predicate",
"(",
")",
")",
")",
"if",
"ctx",
".",
"iri",
"(",
")",
":",
"annot",
".",
"object",
"=",
"self",
".",
"context",
".",
"iri_to_iriref",
"(",
"ctx",
".",
"iri",
"(",
")",
")",
"else",
":",
"annot",
".",
"object",
"=",
"self",
".",
"context",
".",
"literal_to_ObjectLiteral",
"(",
"ctx",
".",
"literal",
"(",
")",
")",
"self",
".",
"annotations",
".",
"append",
"(",
"annot",
")"
]
| annotation: '//' predicate (iri | literal) | [
"annotation",
":",
"//",
"predicate",
"(",
"iri",
"|",
"literal",
")"
]
| python | train | 52.777778 |
cakebread/yolk | yolk/pypi.py | https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/pypi.py#L288-L314 | def filter_url(pkg_type, url):
"""
Returns URL of specified file type
'source', 'egg', or 'all'
"""
bad_stuff = ["?modtime", "#md5="]
for junk in bad_stuff:
if junk in url:
url = url.split(junk)[0]
break
#pkg_spec==dev (svn)
if url.endswith("-dev"):
url = url.split("#egg=")[0]
if pkg_type == "all":
return url
elif pkg_type == "source":
valid_source_types = [".tgz", ".tar.gz", ".zip", ".tbz2", ".tar.bz2"]
for extension in valid_source_types:
if url.lower().endswith(extension):
return url
elif pkg_type == "egg":
if url.lower().endswith(".egg"):
return url | [
"def",
"filter_url",
"(",
"pkg_type",
",",
"url",
")",
":",
"bad_stuff",
"=",
"[",
"\"?modtime\"",
",",
"\"#md5=\"",
"]",
"for",
"junk",
"in",
"bad_stuff",
":",
"if",
"junk",
"in",
"url",
":",
"url",
"=",
"url",
".",
"split",
"(",
"junk",
")",
"[",
"0",
"]",
"break",
"#pkg_spec==dev (svn)",
"if",
"url",
".",
"endswith",
"(",
"\"-dev\"",
")",
":",
"url",
"=",
"url",
".",
"split",
"(",
"\"#egg=\"",
")",
"[",
"0",
"]",
"if",
"pkg_type",
"==",
"\"all\"",
":",
"return",
"url",
"elif",
"pkg_type",
"==",
"\"source\"",
":",
"valid_source_types",
"=",
"[",
"\".tgz\"",
",",
"\".tar.gz\"",
",",
"\".zip\"",
",",
"\".tbz2\"",
",",
"\".tar.bz2\"",
"]",
"for",
"extension",
"in",
"valid_source_types",
":",
"if",
"url",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"extension",
")",
":",
"return",
"url",
"elif",
"pkg_type",
"==",
"\"egg\"",
":",
"if",
"url",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\".egg\"",
")",
":",
"return",
"url"
]
| Returns URL of specified file type
'source', 'egg', or 'all' | [
"Returns",
"URL",
"of",
"specified",
"file",
"type",
"source",
"egg",
"or",
"all"
]
| python | train | 25.666667 |
shimpe/pyvectortween | vectortween/PolarAnimation.py | https://github.com/shimpe/pyvectortween/blob/aff071180474739060ec2d3102c39c8e73510988/vectortween/PolarAnimation.py#L151-L160 | def make_frame(self, frame, birthframe, startframe, stopframe, deathframe, noiseframe=None):
"""
:param frame: current frame
:param birthframe: frame where this animation starts returning something other than None
:param startframe: frame where animation starts to evolve
:param stopframe: frame where animation is completed
:param deathframe: frame where animation starts to return None
:return:
"""
return self.anim.make_frame(frame, birthframe, startframe, stopframe, deathframe, noiseframe) | [
"def",
"make_frame",
"(",
"self",
",",
"frame",
",",
"birthframe",
",",
"startframe",
",",
"stopframe",
",",
"deathframe",
",",
"noiseframe",
"=",
"None",
")",
":",
"return",
"self",
".",
"anim",
".",
"make_frame",
"(",
"frame",
",",
"birthframe",
",",
"startframe",
",",
"stopframe",
",",
"deathframe",
",",
"noiseframe",
")"
]
| :param frame: current frame
:param birthframe: frame where this animation starts returning something other than None
:param startframe: frame where animation starts to evolve
:param stopframe: frame where animation is completed
:param deathframe: frame where animation starts to return None
:return: | [
":",
"param",
"frame",
":",
"current",
"frame",
":",
"param",
"birthframe",
":",
"frame",
"where",
"this",
"animation",
"starts",
"returning",
"something",
"other",
"than",
"None",
":",
"param",
"startframe",
":",
"frame",
"where",
"animation",
"starts",
"to",
"evolve",
":",
"param",
"stopframe",
":",
"frame",
"where",
"animation",
"is",
"completed",
":",
"param",
"deathframe",
":",
"frame",
"where",
"animation",
"starts",
"to",
"return",
"None",
":",
"return",
":"
]
| python | train | 55.9 |
globality-corp/microcosm-postgres | microcosm_postgres/encryption/store.py | https://github.com/globality-corp/microcosm-postgres/blob/43dd793b1fc9b84e4056700f350e79e0df5ff501/microcosm_postgres/encryption/store.py#L30-L73 | def update(self, identifier, new_instance):
"""
Update an encryptable field, make sure that:
* We won't change the encryption context key
* The new value is going to be encrypted
* The return instance.plaintext is the updated one
Note: Will expunge the returned instance
"""
old_instance = self.retrieve(identifier)
old_encrypted_identifier = old_instance.encrypted_identifier
if (
new_instance.encryption_context_key and
old_instance.encryption_context_key != new_instance.encryption_context_key
):
raise ValueError("Cannot change encryption context key")
# If updating a non encrypted field - skip
if new_instance.plaintext is None and new_instance.encrypted_relationship is None:
result = super().update(identifier, new_instance)
self.expunge(result)
return result
# Verify that the new instance is encrypted if it should be
# If it's not - encrypt it with the old key
# If it is - save the expected new plaintext
if new_instance.plaintext is not None:
expected_new_plaintext = new_instance.plaintext
new_instance = self.reencrypt_instance(new_instance, old_instance.encryption_context_key)
else:
decrypt, expected_new_plaintext = decrypt_instance(new_instance)
result = super().update(identifier, new_instance)
# Delete the old encrypted value (instead of using sqlalchemy cascade)
if old_encrypted_identifier != new_instance.encrypted_identifier:
self.encrypted_store.delete(old_encrypted_identifier)
# Update the return result, super().update() won't do it.
self.expunge(result)
result.plaintext = expected_new_plaintext
return result | [
"def",
"update",
"(",
"self",
",",
"identifier",
",",
"new_instance",
")",
":",
"old_instance",
"=",
"self",
".",
"retrieve",
"(",
"identifier",
")",
"old_encrypted_identifier",
"=",
"old_instance",
".",
"encrypted_identifier",
"if",
"(",
"new_instance",
".",
"encryption_context_key",
"and",
"old_instance",
".",
"encryption_context_key",
"!=",
"new_instance",
".",
"encryption_context_key",
")",
":",
"raise",
"ValueError",
"(",
"\"Cannot change encryption context key\"",
")",
"# If updating a non encrypted field - skip",
"if",
"new_instance",
".",
"plaintext",
"is",
"None",
"and",
"new_instance",
".",
"encrypted_relationship",
"is",
"None",
":",
"result",
"=",
"super",
"(",
")",
".",
"update",
"(",
"identifier",
",",
"new_instance",
")",
"self",
".",
"expunge",
"(",
"result",
")",
"return",
"result",
"# Verify that the new instance is encrypted if it should be",
"# If it's not - encrypt it with the old key",
"# If it is - save the expected new plaintext",
"if",
"new_instance",
".",
"plaintext",
"is",
"not",
"None",
":",
"expected_new_plaintext",
"=",
"new_instance",
".",
"plaintext",
"new_instance",
"=",
"self",
".",
"reencrypt_instance",
"(",
"new_instance",
",",
"old_instance",
".",
"encryption_context_key",
")",
"else",
":",
"decrypt",
",",
"expected_new_plaintext",
"=",
"decrypt_instance",
"(",
"new_instance",
")",
"result",
"=",
"super",
"(",
")",
".",
"update",
"(",
"identifier",
",",
"new_instance",
")",
"# Delete the old encrypted value (instead of using sqlalchemy cascade)",
"if",
"old_encrypted_identifier",
"!=",
"new_instance",
".",
"encrypted_identifier",
":",
"self",
".",
"encrypted_store",
".",
"delete",
"(",
"old_encrypted_identifier",
")",
"# Update the return result, super().update() won't do it.",
"self",
".",
"expunge",
"(",
"result",
")",
"result",
".",
"plaintext",
"=",
"expected_new_plaintext",
"return",
"result"
]
| Update an encryptable field, make sure that:
* We won't change the encryption context key
* The new value is going to be encrypted
* The return instance.plaintext is the updated one
Note: Will expunge the returned instance | [
"Update",
"an",
"encryptable",
"field",
"make",
"sure",
"that",
":",
"*",
"We",
"won",
"t",
"change",
"the",
"encryption",
"context",
"key",
"*",
"The",
"new",
"value",
"is",
"going",
"to",
"be",
"encrypted",
"*",
"The",
"return",
"instance",
".",
"plaintext",
"is",
"the",
"updated",
"one"
]
| python | train | 41.477273 |
aleontiev/dj | dj/generator.py | https://github.com/aleontiev/dj/blob/0612d442fdd8d472aea56466568b9857556ecb51/dj/generator.py#L68-L143 | def merge(self):
"""Merges the rendered blueprint into the application."""
temp_dir = self.temp_dir
app_dir = self.application.directory
for root, dirs, files in os.walk(temp_dir):
for directory in dirs:
directory = os.path.join(root, directory)
directory = directory.replace(temp_dir, app_dir, 1)
try:
os.mkdir(directory)
except OSError:
pass
for file in files:
source = os.path.join(root, file)
target = source.replace(temp_dir, app_dir, 1)
relative_target = target.replace(app_dir, '.')
action = 'r'
if (
os.path.exists(target)
and not filecmp.cmp(source, target, shallow=False)
and os.stat(target).st_size > 0
):
# target exists, is not empty, and does not
# match source
if target.endswith('__init__.py'):
# default merge __init__.py files
# if non-empty, these should only
# contain imports from submoduiles
action = 'm'
elif target.endswith('base.py'):
# default skip base.py files
# these should be extended by the developer
action = 's'
else:
default = 'm'
action = click.prompt(
style.prompt(
'%s already exists, '
'[r]eplace, [s]kip, or [m]erge?' % (
relative_target
),
),
default=style.default(default)
) if self.interactive else default
action = click.unstyle(action).lower()
if action not in {'r', 'm', 's'}:
action = default
if action == 's':
self.stdout.write(
'? %s' % style.white(relative_target),
fg='yellow'
)
continue
if action == 'r':
with open(source, 'r') as source_file:
with open(target, 'w') as target_file:
target_file.write(source_file.read())
self.stdout.write(
style.green(
'+ %s' % style.white(relative_target)
)
)
if action == 'm':
with open(target, 'r') as target_file:
with open(source, 'r') as source_file:
merged = merge(
target_file.read(),
source_file.read()
)
with open(target, 'w') as target_file:
target_file.write(merged)
self.stdout.write(
style.yellow('> %s' % style.white(relative_target))
) | [
"def",
"merge",
"(",
"self",
")",
":",
"temp_dir",
"=",
"self",
".",
"temp_dir",
"app_dir",
"=",
"self",
".",
"application",
".",
"directory",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"temp_dir",
")",
":",
"for",
"directory",
"in",
"dirs",
":",
"directory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"directory",
")",
"directory",
"=",
"directory",
".",
"replace",
"(",
"temp_dir",
",",
"app_dir",
",",
"1",
")",
"try",
":",
"os",
".",
"mkdir",
"(",
"directory",
")",
"except",
"OSError",
":",
"pass",
"for",
"file",
"in",
"files",
":",
"source",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"file",
")",
"target",
"=",
"source",
".",
"replace",
"(",
"temp_dir",
",",
"app_dir",
",",
"1",
")",
"relative_target",
"=",
"target",
".",
"replace",
"(",
"app_dir",
",",
"'.'",
")",
"action",
"=",
"'r'",
"if",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"target",
")",
"and",
"not",
"filecmp",
".",
"cmp",
"(",
"source",
",",
"target",
",",
"shallow",
"=",
"False",
")",
"and",
"os",
".",
"stat",
"(",
"target",
")",
".",
"st_size",
">",
"0",
")",
":",
"# target exists, is not empty, and does not",
"# match source",
"if",
"target",
".",
"endswith",
"(",
"'__init__.py'",
")",
":",
"# default merge __init__.py files",
"# if non-empty, these should only",
"# contain imports from submoduiles",
"action",
"=",
"'m'",
"elif",
"target",
".",
"endswith",
"(",
"'base.py'",
")",
":",
"# default skip base.py files",
"# these should be extended by the developer",
"action",
"=",
"'s'",
"else",
":",
"default",
"=",
"'m'",
"action",
"=",
"click",
".",
"prompt",
"(",
"style",
".",
"prompt",
"(",
"'%s already exists, '",
"'[r]eplace, [s]kip, or [m]erge?'",
"%",
"(",
"relative_target",
")",
",",
")",
",",
"default",
"=",
"style",
".",
"default",
"(",
"default",
")",
")",
"if",
"self",
".",
"interactive",
"else",
"default",
"action",
"=",
"click",
".",
"unstyle",
"(",
"action",
")",
".",
"lower",
"(",
")",
"if",
"action",
"not",
"in",
"{",
"'r'",
",",
"'m'",
",",
"'s'",
"}",
":",
"action",
"=",
"default",
"if",
"action",
"==",
"'s'",
":",
"self",
".",
"stdout",
".",
"write",
"(",
"'? %s'",
"%",
"style",
".",
"white",
"(",
"relative_target",
")",
",",
"fg",
"=",
"'yellow'",
")",
"continue",
"if",
"action",
"==",
"'r'",
":",
"with",
"open",
"(",
"source",
",",
"'r'",
")",
"as",
"source_file",
":",
"with",
"open",
"(",
"target",
",",
"'w'",
")",
"as",
"target_file",
":",
"target_file",
".",
"write",
"(",
"source_file",
".",
"read",
"(",
")",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"style",
".",
"green",
"(",
"'+ %s'",
"%",
"style",
".",
"white",
"(",
"relative_target",
")",
")",
")",
"if",
"action",
"==",
"'m'",
":",
"with",
"open",
"(",
"target",
",",
"'r'",
")",
"as",
"target_file",
":",
"with",
"open",
"(",
"source",
",",
"'r'",
")",
"as",
"source_file",
":",
"merged",
"=",
"merge",
"(",
"target_file",
".",
"read",
"(",
")",
",",
"source_file",
".",
"read",
"(",
")",
")",
"with",
"open",
"(",
"target",
",",
"'w'",
")",
"as",
"target_file",
":",
"target_file",
".",
"write",
"(",
"merged",
")",
"self",
".",
"stdout",
".",
"write",
"(",
"style",
".",
"yellow",
"(",
"'> %s'",
"%",
"style",
".",
"white",
"(",
"relative_target",
")",
")",
")"
]
| Merges the rendered blueprint into the application. | [
"Merges",
"the",
"rendered",
"blueprint",
"into",
"the",
"application",
"."
]
| python | train | 43.789474 |
s1s1ty/py-jsonq | pyjsonq/query.py | https://github.com/s1s1ty/py-jsonq/blob/9625597a2578bddcbed4e540174d5253b1fc3b75/pyjsonq/query.py#L332-L345 | def sum(self, property):
"""Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float
"""
self.__prepare()
total = 0
for i in self._json_data:
total += i.get(property)
return total | [
"def",
"sum",
"(",
"self",
",",
"property",
")",
":",
"self",
".",
"__prepare",
"(",
")",
"total",
"=",
"0",
"for",
"i",
"in",
"self",
".",
"_json_data",
":",
"total",
"+=",
"i",
".",
"get",
"(",
"property",
")",
"return",
"total"
]
| Getting the sum according to the given property
:@param property
:@type property: string
:@return int/float | [
"Getting",
"the",
"sum",
"according",
"to",
"the",
"given",
"property"
]
| python | train | 21.714286 |
5monkeys/django-bananas | bananas/admin/api/mixins.py | https://github.com/5monkeys/django-bananas/blob/cfd318c737f6c4580036c13d2acf32bca96654bf/bananas/admin/api/mixins.py#L78-L88 | def get_url_name(self, action_url_name="list"):
"""
Get full namespaced url name to use for reverse()
"""
url_name = "{}-{}".format(self.basename, action_url_name)
namespace = self.request.resolver_match.namespace
if namespace:
url_name = "{}:{}".format(namespace, url_name)
return url_name | [
"def",
"get_url_name",
"(",
"self",
",",
"action_url_name",
"=",
"\"list\"",
")",
":",
"url_name",
"=",
"\"{}-{}\"",
".",
"format",
"(",
"self",
".",
"basename",
",",
"action_url_name",
")",
"namespace",
"=",
"self",
".",
"request",
".",
"resolver_match",
".",
"namespace",
"if",
"namespace",
":",
"url_name",
"=",
"\"{}:{}\"",
".",
"format",
"(",
"namespace",
",",
"url_name",
")",
"return",
"url_name"
]
| Get full namespaced url name to use for reverse() | [
"Get",
"full",
"namespaced",
"url",
"name",
"to",
"use",
"for",
"reverse",
"()"
]
| python | test | 31.818182 |
NASA-AMMOS/AIT-Core | ait/core/bsc.py | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/bsc.py#L480-L558 | def add_logger(self, name, address, conn_type, log_dir_path=None, **kwargs):
''' Add a new stream capturer to the manager.
Add a new stream capturer to the manager with the provided configuration
details. If an existing capturer is monitoring the same address the
new handler will be added to it.
Args:
name:
A string defining the new capturer's name.
address:
A tuple containing address data for the capturer. Check the
:class:`SocketStreamCapturer` documentation for what is
required.
conn_type:
A string defining the connection type. Check the
:class:`SocketStreamCapturer` documentation for a list of valid
options.
log_dir_path:
An optional path defining the directory where the
capturer should write its files. If this isn't provided the root
log directory from the manager configuration is used.
'''
capture_handler_conf = kwargs
if not log_dir_path:
log_dir_path = self._mngr_conf['root_log_directory']
log_dir_path = os.path.normpath(os.path.expanduser(log_dir_path))
capture_handler_conf['log_dir'] = log_dir_path
capture_handler_conf['name'] = name
if 'rotate_log' not in capture_handler_conf:
capture_handler_conf['rotate_log'] = True
transforms = []
if 'pre_write_transforms' in capture_handler_conf:
for transform in capture_handler_conf['pre_write_transforms']:
if isinstance(transform, str):
if globals().has_key(transform):
transforms.append(globals().get(transform))
else:
msg = (
'Unable to load data transformation '
'"{}" for handler "{}"'
).format(
transform,
capture_handler_conf['name']
)
log.warn(msg)
elif hasattr(transform, '__call__'):
transforms.append(transform)
else:
msg = (
'Unable to determine how to load data transform "{}"'
).format(transform)
log.warn(msg)
capture_handler_conf['pre_write_transforms'] = transforms
address_key = str(address)
if address_key in self._stream_capturers:
capturer = self._stream_capturers[address_key][0]
capturer.add_handler(capture_handler_conf)
return
socket_logger = SocketStreamCapturer(capture_handler_conf,
address,
conn_type)
greenlet = gevent.spawn(socket_logger.socket_monitor_loop)
self._stream_capturers[address_key] = (
socket_logger,
greenlet
)
self._pool.add(greenlet) | [
"def",
"add_logger",
"(",
"self",
",",
"name",
",",
"address",
",",
"conn_type",
",",
"log_dir_path",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"capture_handler_conf",
"=",
"kwargs",
"if",
"not",
"log_dir_path",
":",
"log_dir_path",
"=",
"self",
".",
"_mngr_conf",
"[",
"'root_log_directory'",
"]",
"log_dir_path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"log_dir_path",
")",
")",
"capture_handler_conf",
"[",
"'log_dir'",
"]",
"=",
"log_dir_path",
"capture_handler_conf",
"[",
"'name'",
"]",
"=",
"name",
"if",
"'rotate_log'",
"not",
"in",
"capture_handler_conf",
":",
"capture_handler_conf",
"[",
"'rotate_log'",
"]",
"=",
"True",
"transforms",
"=",
"[",
"]",
"if",
"'pre_write_transforms'",
"in",
"capture_handler_conf",
":",
"for",
"transform",
"in",
"capture_handler_conf",
"[",
"'pre_write_transforms'",
"]",
":",
"if",
"isinstance",
"(",
"transform",
",",
"str",
")",
":",
"if",
"globals",
"(",
")",
".",
"has_key",
"(",
"transform",
")",
":",
"transforms",
".",
"append",
"(",
"globals",
"(",
")",
".",
"get",
"(",
"transform",
")",
")",
"else",
":",
"msg",
"=",
"(",
"'Unable to load data transformation '",
"'\"{}\" for handler \"{}\"'",
")",
".",
"format",
"(",
"transform",
",",
"capture_handler_conf",
"[",
"'name'",
"]",
")",
"log",
".",
"warn",
"(",
"msg",
")",
"elif",
"hasattr",
"(",
"transform",
",",
"'__call__'",
")",
":",
"transforms",
".",
"append",
"(",
"transform",
")",
"else",
":",
"msg",
"=",
"(",
"'Unable to determine how to load data transform \"{}\"'",
")",
".",
"format",
"(",
"transform",
")",
"log",
".",
"warn",
"(",
"msg",
")",
"capture_handler_conf",
"[",
"'pre_write_transforms'",
"]",
"=",
"transforms",
"address_key",
"=",
"str",
"(",
"address",
")",
"if",
"address_key",
"in",
"self",
".",
"_stream_capturers",
":",
"capturer",
"=",
"self",
".",
"_stream_capturers",
"[",
"address_key",
"]",
"[",
"0",
"]",
"capturer",
".",
"add_handler",
"(",
"capture_handler_conf",
")",
"return",
"socket_logger",
"=",
"SocketStreamCapturer",
"(",
"capture_handler_conf",
",",
"address",
",",
"conn_type",
")",
"greenlet",
"=",
"gevent",
".",
"spawn",
"(",
"socket_logger",
".",
"socket_monitor_loop",
")",
"self",
".",
"_stream_capturers",
"[",
"address_key",
"]",
"=",
"(",
"socket_logger",
",",
"greenlet",
")",
"self",
".",
"_pool",
".",
"add",
"(",
"greenlet",
")"
]
| Add a new stream capturer to the manager.
Add a new stream capturer to the manager with the provided configuration
details. If an existing capturer is monitoring the same address the
new handler will be added to it.
Args:
name:
A string defining the new capturer's name.
address:
A tuple containing address data for the capturer. Check the
:class:`SocketStreamCapturer` documentation for what is
required.
conn_type:
A string defining the connection type. Check the
:class:`SocketStreamCapturer` documentation for a list of valid
options.
log_dir_path:
An optional path defining the directory where the
capturer should write its files. If this isn't provided the root
log directory from the manager configuration is used. | [
"Add",
"a",
"new",
"stream",
"capturer",
"to",
"the",
"manager",
"."
]
| python | train | 38.974684 |
collectiveacuity/labPack | labpack/platforms/docker.py | https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/docker.py#L591-L617 | def enter(self, container_alias):
'''
a method to open up a terminal inside a running container
:param container_alias: string with name or id of container
:return: None
'''
title = '%s.enter' % self.__class__.__name__
# validate inputs
input_fields = {
'container_alias': container_alias
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# compose system command
from os import system
sys_cmd = 'docker exec -it %s sh' % container_alias
if self.localhost.os.sysname in ('Windows'):
sys_cmd = 'winpty %s' % sys_cmd
# open up terminal
system(sys_cmd) | [
"def",
"enter",
"(",
"self",
",",
"container_alias",
")",
":",
"title",
"=",
"'%s.enter'",
"%",
"self",
".",
"__class__",
".",
"__name__",
"# validate inputs\r",
"input_fields",
"=",
"{",
"'container_alias'",
":",
"container_alias",
"}",
"for",
"key",
",",
"value",
"in",
"input_fields",
".",
"items",
"(",
")",
":",
"object_title",
"=",
"'%s(%s=%s)'",
"%",
"(",
"title",
",",
"key",
",",
"str",
"(",
"value",
")",
")",
"self",
".",
"fields",
".",
"validate",
"(",
"value",
",",
"'.%s'",
"%",
"key",
",",
"object_title",
")",
"# compose system command\r",
"from",
"os",
"import",
"system",
"sys_cmd",
"=",
"'docker exec -it %s sh'",
"%",
"container_alias",
"if",
"self",
".",
"localhost",
".",
"os",
".",
"sysname",
"in",
"(",
"'Windows'",
")",
":",
"sys_cmd",
"=",
"'winpty %s'",
"%",
"sys_cmd",
"# open up terminal\r",
"system",
"(",
"sys_cmd",
")"
]
| a method to open up a terminal inside a running container
:param container_alias: string with name or id of container
:return: None | [
"a",
"method",
"to",
"open",
"up",
"a",
"terminal",
"inside",
"a",
"running",
"container",
":",
"param",
"container_alias",
":",
"string",
"with",
"name",
"or",
"id",
"of",
"container",
":",
"return",
":",
"None"
]
| python | train | 31 |
moralrecordings/mrcrowbar | mrcrowbar/ansi.py | https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/ansi.py#L155-L221 | def format_pixels( top, bottom, reset=True, repeat=1 ):
"""Return the ANSI escape sequence to render two vertically-stacked pixels as a
single monospace character.
top
Top colour to use. Accepted types: None, int (xterm
palette ID), tuple (RGB, RGBA), Colour
bottom
Bottom colour to use. Accepted types: None, int (xterm
palette ID), tuple (RGB, RGBA), Colour
reset
Reset the formatting at the end (default: True)
repeat
Number of horizontal pixels to render (default: 1)
"""
top_src = None
if isinstance( top, int ):
top_src = top
else:
top_rgba = colour.normalise_rgba( top )
if top_rgba[3] != 0:
top_src = top_rgba
bottom_src = None
if isinstance( bottom, int ):
bottom_src = bottom
else:
bottom_rgba = colour.normalise_rgba( bottom )
if bottom_rgba[3] != 0:
bottom_src = bottom_rgba
# short circuit for empty pixel
if (top_src is None) and (bottom_src is None):
return ' '*repeat
string = '▀'*repeat;
colour_format = []
if top_src == bottom_src:
string = '█'*repeat
elif (top_src is None) and (bottom_src is not None):
string = '▄'*repeat
if (top_src is None) and (bottom_src is not None):
if isinstance( bottom_src, int ):
colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( bottom_src ) )
else:
colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *bottom_src[:3] ) )
else:
if isinstance( top_src, int ):
colour_format.append( ANSI_FORMAT_FOREGROUND_XTERM_CMD.format( top_src ) )
else:
colour_format.append( ANSI_FORMAT_FOREGROUND_CMD.format( *top_src[:3] ) )
if top_src is not None and bottom_src is not None and top_src != bottom_src:
if isinstance( top_src, int ):
colour_format.append( ANSI_FORMAT_BACKGROUND_XTERM_CMD.format( bottom_src ) )
else:
colour_format.append( ANSI_FORMAT_BACKGROUND_CMD.format( *bottom_src[:3] ) )
colour_format = ANSI_FORMAT_BASE.format( ';'.join( colour_format ) )
reset_format = '' if not reset else ANSI_FORMAT_RESET
return '{}{}{}'.format( colour_format, string, reset_format ) | [
"def",
"format_pixels",
"(",
"top",
",",
"bottom",
",",
"reset",
"=",
"True",
",",
"repeat",
"=",
"1",
")",
":",
"top_src",
"=",
"None",
"if",
"isinstance",
"(",
"top",
",",
"int",
")",
":",
"top_src",
"=",
"top",
"else",
":",
"top_rgba",
"=",
"colour",
".",
"normalise_rgba",
"(",
"top",
")",
"if",
"top_rgba",
"[",
"3",
"]",
"!=",
"0",
":",
"top_src",
"=",
"top_rgba",
"bottom_src",
"=",
"None",
"if",
"isinstance",
"(",
"bottom",
",",
"int",
")",
":",
"bottom_src",
"=",
"bottom",
"else",
":",
"bottom_rgba",
"=",
"colour",
".",
"normalise_rgba",
"(",
"bottom",
")",
"if",
"bottom_rgba",
"[",
"3",
"]",
"!=",
"0",
":",
"bottom_src",
"=",
"bottom_rgba",
"# short circuit for empty pixel",
"if",
"(",
"top_src",
"is",
"None",
")",
"and",
"(",
"bottom_src",
"is",
"None",
")",
":",
"return",
"' '",
"*",
"repeat",
"string",
"=",
"'▀'*r",
"e",
"peat;",
"colour_format",
"=",
"[",
"]",
"if",
"top_src",
"==",
"bottom_src",
":",
"string",
"=",
"'█'*r",
"e",
"peat",
"elif",
"(",
"top_src",
"is",
"None",
")",
"and",
"(",
"bottom_src",
"is",
"not",
"None",
")",
":",
"string",
"=",
"'▄'*r",
"e",
"peat",
"if",
"(",
"top_src",
"is",
"None",
")",
"and",
"(",
"bottom_src",
"is",
"not",
"None",
")",
":",
"if",
"isinstance",
"(",
"bottom_src",
",",
"int",
")",
":",
"colour_format",
".",
"append",
"(",
"ANSI_FORMAT_FOREGROUND_XTERM_CMD",
".",
"format",
"(",
"bottom_src",
")",
")",
"else",
":",
"colour_format",
".",
"append",
"(",
"ANSI_FORMAT_FOREGROUND_CMD",
".",
"format",
"(",
"*",
"bottom_src",
"[",
":",
"3",
"]",
")",
")",
"else",
":",
"if",
"isinstance",
"(",
"top_src",
",",
"int",
")",
":",
"colour_format",
".",
"append",
"(",
"ANSI_FORMAT_FOREGROUND_XTERM_CMD",
".",
"format",
"(",
"top_src",
")",
")",
"else",
":",
"colour_format",
".",
"append",
"(",
"ANSI_FORMAT_FOREGROUND_CMD",
".",
"format",
"(",
"*",
"top_src",
"[",
":",
"3",
"]",
")",
")",
"if",
"top_src",
"is",
"not",
"None",
"and",
"bottom_src",
"is",
"not",
"None",
"and",
"top_src",
"!=",
"bottom_src",
":",
"if",
"isinstance",
"(",
"top_src",
",",
"int",
")",
":",
"colour_format",
".",
"append",
"(",
"ANSI_FORMAT_BACKGROUND_XTERM_CMD",
".",
"format",
"(",
"bottom_src",
")",
")",
"else",
":",
"colour_format",
".",
"append",
"(",
"ANSI_FORMAT_BACKGROUND_CMD",
".",
"format",
"(",
"*",
"bottom_src",
"[",
":",
"3",
"]",
")",
")",
"colour_format",
"=",
"ANSI_FORMAT_BASE",
".",
"format",
"(",
"';'",
".",
"join",
"(",
"colour_format",
")",
")",
"reset_format",
"=",
"''",
"if",
"not",
"reset",
"else",
"ANSI_FORMAT_RESET",
"return",
"'{}{}{}'",
".",
"format",
"(",
"colour_format",
",",
"string",
",",
"reset_format",
")"
]
| Return the ANSI escape sequence to render two vertically-stacked pixels as a
single monospace character.
top
Top colour to use. Accepted types: None, int (xterm
palette ID), tuple (RGB, RGBA), Colour
bottom
Bottom colour to use. Accepted types: None, int (xterm
palette ID), tuple (RGB, RGBA), Colour
reset
Reset the formatting at the end (default: True)
repeat
Number of horizontal pixels to render (default: 1) | [
"Return",
"the",
"ANSI",
"escape",
"sequence",
"to",
"render",
"two",
"vertically",
"-",
"stacked",
"pixels",
"as",
"a",
"single",
"monospace",
"character",
"."
]
| python | train | 33.507463 |
dlecocq/nsq-py | nsq/checker.py | https://github.com/dlecocq/nsq-py/blob/3ecacf6ab7719d38031179277113d875554a0c16/nsq/checker.py#L40-L43 | def callback(self):
'''Run the callback'''
self._callback(*self._args, **self._kwargs)
self._last_checked = time.time() | [
"def",
"callback",
"(",
"self",
")",
":",
"self",
".",
"_callback",
"(",
"*",
"self",
".",
"_args",
",",
"*",
"*",
"self",
".",
"_kwargs",
")",
"self",
".",
"_last_checked",
"=",
"time",
".",
"time",
"(",
")"
]
| Run the callback | [
"Run",
"the",
"callback"
]
| python | train | 35 |
joshspeagle/dynesty | dynesty/nestedsamplers.py | https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/nestedsamplers.py#L570-L587 | def propose_unif(self):
"""Propose a new live point by sampling *uniformly* within
the union of ellipsoids."""
while True:
# Sample a point from the union of ellipsoids.
# Returns the point `u`, ellipsoid index `idx`, and number of
# overlapping ellipsoids `q` at position `u`.
u, idx, q = self.mell.sample(rstate=self.rstate, return_q=True)
# Check if the point is within the unit cube.
if unitcheck(u, self.nonperiodic):
# Accept the point with probability 1/q to account for
# overlapping ellipsoids.
if q == 1 or self.rstate.rand() < 1.0 / q:
break # if successful, we're done!
return u, self.mell.ells[idx].axes | [
"def",
"propose_unif",
"(",
"self",
")",
":",
"while",
"True",
":",
"# Sample a point from the union of ellipsoids.",
"# Returns the point `u`, ellipsoid index `idx`, and number of",
"# overlapping ellipsoids `q` at position `u`.",
"u",
",",
"idx",
",",
"q",
"=",
"self",
".",
"mell",
".",
"sample",
"(",
"rstate",
"=",
"self",
".",
"rstate",
",",
"return_q",
"=",
"True",
")",
"# Check if the point is within the unit cube.",
"if",
"unitcheck",
"(",
"u",
",",
"self",
".",
"nonperiodic",
")",
":",
"# Accept the point with probability 1/q to account for",
"# overlapping ellipsoids.",
"if",
"q",
"==",
"1",
"or",
"self",
".",
"rstate",
".",
"rand",
"(",
")",
"<",
"1.0",
"/",
"q",
":",
"break",
"# if successful, we're done!",
"return",
"u",
",",
"self",
".",
"mell",
".",
"ells",
"[",
"idx",
"]",
".",
"axes"
]
| Propose a new live point by sampling *uniformly* within
the union of ellipsoids. | [
"Propose",
"a",
"new",
"live",
"point",
"by",
"sampling",
"*",
"uniformly",
"*",
"within",
"the",
"union",
"of",
"ellipsoids",
"."
]
| python | train | 43.055556 |
timeyyy/apptools | peasoup/peasoup.py | https://github.com/timeyyy/apptools/blob/d3c0f324b0c2689c35f5601348276f4efd6cb240/peasoup/peasoup.py#L446-L489 | def set_windows_permissions(filename):
'''
At least on windows 7 if a file is created on an Admin account,
Other users will not be given execute or full control.
However if a user creates the file himself it will work...
So just always change permissions after creating a file on windows
Change the permissions for Allusers of the application
The Everyone Group
Full access
http://timgolden.me.uk/python/win32_how_do_i/add-security-to-a-file.html
'''
#Todo rename this to allow_all, also make international not just for english..
if os.name == 'nt':
try:
everyone, domain, type = win32security.LookupAccountName(
"", "Everyone")
except Exception:
# Todo fails on non english langauge systesm ... FU WINDOWS
# Just allow permission for the current user then...
everyone, domain, type = win32security.LookupAccountName ("", win32api.GetUserName())
# ~ user, domain, type = win32security.LookupAccountName ("", win32api.GetUserName())
#~ userx, domain, type = win32security.LookupAccountName ("", "User")
#~ usery, domain, type = win32security.LookupAccountName ("", "User Y")
sd = win32security.GetFileSecurity(
filename,
win32security.DACL_SECURITY_INFORMATION)
# instead of dacl = win32security.ACL()
dacl = sd.GetSecurityDescriptorDacl()
#~ dacl.AddAccessAllowedAce(win32security.ACL_REVISION, con.FILE_GENERIC_READ | con.FILE_GENERIC_WRITE, everyone)
#~ dacl.AddAccessAllowedAce(win32security.ACL_REVISION, con.FILE_ALL_ACCESS, user)
dacl.AddAccessAllowedAce(
win32security.ACL_REVISION,
con.FILE_ALL_ACCESS,
everyone)
sd.SetSecurityDescriptorDacl(1, dacl, 0) # may not be necessary
win32security.SetFileSecurity(
filename,
win32security.DACL_SECURITY_INFORMATION,
sd) | [
"def",
"set_windows_permissions",
"(",
"filename",
")",
":",
"#Todo rename this to allow_all, also make international not just for english..",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"try",
":",
"everyone",
",",
"domain",
",",
"type",
"=",
"win32security",
".",
"LookupAccountName",
"(",
"\"\"",
",",
"\"Everyone\"",
")",
"except",
"Exception",
":",
"# Todo fails on non english langauge systesm ... FU WINDOWS",
"# Just allow permission for the current user then...",
"everyone",
",",
"domain",
",",
"type",
"=",
"win32security",
".",
"LookupAccountName",
"(",
"\"\"",
",",
"win32api",
".",
"GetUserName",
"(",
")",
")",
"# ~ user, domain, type = win32security.LookupAccountName (\"\", win32api.GetUserName())",
"#~ userx, domain, type = win32security.LookupAccountName (\"\", \"User\")",
"#~ usery, domain, type = win32security.LookupAccountName (\"\", \"User Y\")",
"sd",
"=",
"win32security",
".",
"GetFileSecurity",
"(",
"filename",
",",
"win32security",
".",
"DACL_SECURITY_INFORMATION",
")",
"# instead of dacl = win32security.ACL()",
"dacl",
"=",
"sd",
".",
"GetSecurityDescriptorDacl",
"(",
")",
"#~ dacl.AddAccessAllowedAce(win32security.ACL_REVISION, con.FILE_GENERIC_READ | con.FILE_GENERIC_WRITE, everyone)",
"#~ dacl.AddAccessAllowedAce(win32security.ACL_REVISION, con.FILE_ALL_ACCESS, user)",
"dacl",
".",
"AddAccessAllowedAce",
"(",
"win32security",
".",
"ACL_REVISION",
",",
"con",
".",
"FILE_ALL_ACCESS",
",",
"everyone",
")",
"sd",
".",
"SetSecurityDescriptorDacl",
"(",
"1",
",",
"dacl",
",",
"0",
")",
"# may not be necessary",
"win32security",
".",
"SetFileSecurity",
"(",
"filename",
",",
"win32security",
".",
"DACL_SECURITY_INFORMATION",
",",
"sd",
")"
]
| At least on windows 7 if a file is created on an Admin account,
Other users will not be given execute or full control.
However if a user creates the file himself it will work...
So just always change permissions after creating a file on windows
Change the permissions for Allusers of the application
The Everyone Group
Full access
http://timgolden.me.uk/python/win32_how_do_i/add-security-to-a-file.html | [
"At",
"least",
"on",
"windows",
"7",
"if",
"a",
"file",
"is",
"created",
"on",
"an",
"Admin",
"account",
"Other",
"users",
"will",
"not",
"be",
"given",
"execute",
"or",
"full",
"control",
".",
"However",
"if",
"a",
"user",
"creates",
"the",
"file",
"himself",
"it",
"will",
"work",
"...",
"So",
"just",
"always",
"change",
"permissions",
"after",
"creating",
"a",
"file",
"on",
"windows"
]
| python | train | 44.136364 |
ioos/compliance-checker | compliance_checker/cfutil.py | https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cfutil.py#L501-L523 | def get_longitude_variables(nc):
'''
Returns a list of all variables matching definitions for longitude
:param netcdf4.dataset nc: an open netcdf dataset object
'''
longitude_variables = []
# standard_name takes precedence
for variable in nc.get_variables_by_attributes(standard_name="longitude"):
longitude_variables.append(variable.name)
# Then axis
for variable in nc.get_variables_by_attributes(axis='X'):
if variable.name not in longitude_variables:
longitude_variables.append(variable.name)
check_fn = partial(attr_membership, value_set=VALID_LON_UNITS,
modifier_fn=lambda s: s.lower())
for variable in nc.get_variables_by_attributes(units=check_fn):
if variable.name not in longitude_variables:
longitude_variables.append(variable.name)
return longitude_variables | [
"def",
"get_longitude_variables",
"(",
"nc",
")",
":",
"longitude_variables",
"=",
"[",
"]",
"# standard_name takes precedence",
"for",
"variable",
"in",
"nc",
".",
"get_variables_by_attributes",
"(",
"standard_name",
"=",
"\"longitude\"",
")",
":",
"longitude_variables",
".",
"append",
"(",
"variable",
".",
"name",
")",
"# Then axis",
"for",
"variable",
"in",
"nc",
".",
"get_variables_by_attributes",
"(",
"axis",
"=",
"'X'",
")",
":",
"if",
"variable",
".",
"name",
"not",
"in",
"longitude_variables",
":",
"longitude_variables",
".",
"append",
"(",
"variable",
".",
"name",
")",
"check_fn",
"=",
"partial",
"(",
"attr_membership",
",",
"value_set",
"=",
"VALID_LON_UNITS",
",",
"modifier_fn",
"=",
"lambda",
"s",
":",
"s",
".",
"lower",
"(",
")",
")",
"for",
"variable",
"in",
"nc",
".",
"get_variables_by_attributes",
"(",
"units",
"=",
"check_fn",
")",
":",
"if",
"variable",
".",
"name",
"not",
"in",
"longitude_variables",
":",
"longitude_variables",
".",
"append",
"(",
"variable",
".",
"name",
")",
"return",
"longitude_variables"
]
| Returns a list of all variables matching definitions for longitude
:param netcdf4.dataset nc: an open netcdf dataset object | [
"Returns",
"a",
"list",
"of",
"all",
"variables",
"matching",
"definitions",
"for",
"longitude"
]
| python | train | 37.869565 |
gwastro/pycbc-glue | pycbc_glue/LDBDClient.py | https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/LDBDClient.py#L310-L327 | def ping(self):
"""
Ping the LDBD Server and return any message received back as a string.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "PING\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error pinging server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply | [
"def",
"ping",
"(",
"self",
")",
":",
"msg",
"=",
"\"PING\\0\"",
"self",
".",
"sfile",
".",
"write",
"(",
"msg",
")",
"ret",
",",
"output",
"=",
"self",
".",
"__response__",
"(",
")",
"reply",
"=",
"str",
"(",
"output",
"[",
"0",
"]",
")",
"if",
"ret",
":",
"msg",
"=",
"\"Error pinging server %d:%s\"",
"%",
"(",
"ret",
",",
"reply",
")",
"raise",
"LDBDClientException",
",",
"msg",
"return",
"reply"
]
| Ping the LDBD Server and return any message received back as a string.
@return: message received (may be empty) from LDBD Server as a string | [
"Ping",
"the",
"LDBD",
"Server",
"and",
"return",
"any",
"message",
"received",
"back",
"as",
"a",
"string",
"."
]
| python | train | 22.222222 |
mbedmicro/pyOCD | pyocd/target/pack/cmsis_pack.py | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/pack/cmsis_pack.py#L489-L504 | def default_reset_type(self):
"""! @brief One of the Target.ResetType enums.
@todo Support multiple cores.
"""
try:
resetSequence = self._info.debugs[0].attrib['defaultResetSequence']
if resetSequence == 'ResetHardware':
return Target.ResetType.HW
elif resetSequence == 'ResetSystem':
return Target.ResetType.SW_SYSRESETREQ
elif resetSequence == 'ResetProcessor':
return Target.ResetType.SW_VECTRESET
else:
return Target.ResetType.SW
except (KeyError, IndexError):
return Target.ResetType.SW | [
"def",
"default_reset_type",
"(",
"self",
")",
":",
"try",
":",
"resetSequence",
"=",
"self",
".",
"_info",
".",
"debugs",
"[",
"0",
"]",
".",
"attrib",
"[",
"'defaultResetSequence'",
"]",
"if",
"resetSequence",
"==",
"'ResetHardware'",
":",
"return",
"Target",
".",
"ResetType",
".",
"HW",
"elif",
"resetSequence",
"==",
"'ResetSystem'",
":",
"return",
"Target",
".",
"ResetType",
".",
"SW_SYSRESETREQ",
"elif",
"resetSequence",
"==",
"'ResetProcessor'",
":",
"return",
"Target",
".",
"ResetType",
".",
"SW_VECTRESET",
"else",
":",
"return",
"Target",
".",
"ResetType",
".",
"SW",
"except",
"(",
"KeyError",
",",
"IndexError",
")",
":",
"return",
"Target",
".",
"ResetType",
".",
"SW"
]
| ! @brief One of the Target.ResetType enums.
@todo Support multiple cores. | [
"!"
]
| python | train | 40.75 |
geertj/gruvi | lib/gruvi/ssl.py | https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/ssl.py#L494-L500 | def close(self):
"""Cleanly shut down the SSL protocol and close the transport."""
if self._closing or self._handle.closed:
return
self._closing = True
self._write_backlog.append([b'', False])
self._process_write_backlog() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_closing",
"or",
"self",
".",
"_handle",
".",
"closed",
":",
"return",
"self",
".",
"_closing",
"=",
"True",
"self",
".",
"_write_backlog",
".",
"append",
"(",
"[",
"b''",
",",
"False",
"]",
")",
"self",
".",
"_process_write_backlog",
"(",
")"
]
| Cleanly shut down the SSL protocol and close the transport. | [
"Cleanly",
"shut",
"down",
"the",
"SSL",
"protocol",
"and",
"close",
"the",
"transport",
"."
]
| python | train | 38.285714 |
juju/charm-helpers | charmhelpers/contrib/openstack/templating.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/templating.py#L330-L334 | def write_all(self):
"""
Write out all registered config files.
"""
[self.write(k) for k in six.iterkeys(self.templates)] | [
"def",
"write_all",
"(",
"self",
")",
":",
"[",
"self",
".",
"write",
"(",
"k",
")",
"for",
"k",
"in",
"six",
".",
"iterkeys",
"(",
"self",
".",
"templates",
")",
"]"
]
| Write out all registered config files. | [
"Write",
"out",
"all",
"registered",
"config",
"files",
"."
]
| python | train | 29.8 |
openstack/python-scciclient | scciclient/irmc/viom/client.py | https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/viom/client.py#L312-L323 | def set_lan_port(self, port_id, mac=None):
"""Set LAN port information to configuration.
:param port_id: Physical port ID.
:param mac: virtual MAC address if virtualization is necessary.
"""
port_handler = _parse_physical_port_id(port_id)
port = self._find_port(port_handler)
if port:
port_handler.set_lan_port(port, mac)
else:
self._add_port(port_handler, port_handler.create_lan_port(mac)) | [
"def",
"set_lan_port",
"(",
"self",
",",
"port_id",
",",
"mac",
"=",
"None",
")",
":",
"port_handler",
"=",
"_parse_physical_port_id",
"(",
"port_id",
")",
"port",
"=",
"self",
".",
"_find_port",
"(",
"port_handler",
")",
"if",
"port",
":",
"port_handler",
".",
"set_lan_port",
"(",
"port",
",",
"mac",
")",
"else",
":",
"self",
".",
"_add_port",
"(",
"port_handler",
",",
"port_handler",
".",
"create_lan_port",
"(",
"mac",
")",
")"
]
| Set LAN port information to configuration.
:param port_id: Physical port ID.
:param mac: virtual MAC address if virtualization is necessary. | [
"Set",
"LAN",
"port",
"information",
"to",
"configuration",
"."
]
| python | train | 39.083333 |
saltstack/salt | salt/states/virt.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/virt.py#L51-L145 | def keys(name, basepath='/etc/pki', **kwargs):
'''
Manage libvirt keys.
name
The name variable used to track the execution
basepath
Defaults to ``/etc/pki``, this is the root location used for libvirt
keys on the hypervisor
The following parameters are optional:
country
The country that the certificate should use. Defaults to US.
.. versionadded:: 2018.3.0
state
The state that the certificate should use. Defaults to Utah.
.. versionadded:: 2018.3.0
locality
The locality that the certificate should use.
Defaults to Salt Lake City.
.. versionadded:: 2018.3.0
organization
The organization that the certificate should use.
Defaults to Salted.
.. versionadded:: 2018.3.0
expiration_days
The number of days that the certificate should be valid for.
Defaults to 365 days (1 year)
.. versionadded:: 2018.3.0
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
# Grab all kwargs to make them available as pillar values
# rename them to something hopefully unique to avoid
# overriding anything existing
pillar_kwargs = {}
for key, value in six.iteritems(kwargs):
pillar_kwargs['ext_pillar_virt.{0}'.format(key)] = value
pillar = __salt__['pillar.ext']({'libvirt': '_'}, pillar_kwargs)
paths = {
'serverkey': os.path.join(basepath, 'libvirt',
'private', 'serverkey.pem'),
'servercert': os.path.join(basepath, 'libvirt',
'servercert.pem'),
'clientkey': os.path.join(basepath, 'libvirt',
'private', 'clientkey.pem'),
'clientcert': os.path.join(basepath, 'libvirt',
'clientcert.pem'),
'cacert': os.path.join(basepath, 'CA', 'cacert.pem')
}
for key in paths:
p_key = 'libvirt.{0}.pem'.format(key)
if p_key not in pillar:
continue
if not os.path.exists(os.path.dirname(paths[key])):
os.makedirs(os.path.dirname(paths[key]))
if os.path.isfile(paths[key]):
with salt.utils.files.fopen(paths[key], 'r') as fp_:
if salt.utils.stringutils.to_unicode(fp_.read()) != pillar[p_key]:
ret['changes'][key] = 'update'
else:
ret['changes'][key] = 'new'
if not ret['changes']:
ret['comment'] = 'All keys are correct'
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Libvirt keys are set to be updated'
ret['changes'] = {}
else:
for key in ret['changes']:
with salt.utils.files.fopen(paths[key], 'w+') as fp_:
fp_.write(
salt.utils.stringutils.to_str(
pillar['libvirt.{0}.pem'.format(key)]
)
)
ret['comment'] = 'Updated libvirt certs and keys'
return ret | [
"def",
"keys",
"(",
"name",
",",
"basepath",
"=",
"'/etc/pki'",
",",
"*",
"*",
"kwargs",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"True",
",",
"'comment'",
":",
"''",
"}",
"# Grab all kwargs to make them available as pillar values",
"# rename them to something hopefully unique to avoid",
"# overriding anything existing",
"pillar_kwargs",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"kwargs",
")",
":",
"pillar_kwargs",
"[",
"'ext_pillar_virt.{0}'",
".",
"format",
"(",
"key",
")",
"]",
"=",
"value",
"pillar",
"=",
"__salt__",
"[",
"'pillar.ext'",
"]",
"(",
"{",
"'libvirt'",
":",
"'_'",
"}",
",",
"pillar_kwargs",
")",
"paths",
"=",
"{",
"'serverkey'",
":",
"os",
".",
"path",
".",
"join",
"(",
"basepath",
",",
"'libvirt'",
",",
"'private'",
",",
"'serverkey.pem'",
")",
",",
"'servercert'",
":",
"os",
".",
"path",
".",
"join",
"(",
"basepath",
",",
"'libvirt'",
",",
"'servercert.pem'",
")",
",",
"'clientkey'",
":",
"os",
".",
"path",
".",
"join",
"(",
"basepath",
",",
"'libvirt'",
",",
"'private'",
",",
"'clientkey.pem'",
")",
",",
"'clientcert'",
":",
"os",
".",
"path",
".",
"join",
"(",
"basepath",
",",
"'libvirt'",
",",
"'clientcert.pem'",
")",
",",
"'cacert'",
":",
"os",
".",
"path",
".",
"join",
"(",
"basepath",
",",
"'CA'",
",",
"'cacert.pem'",
")",
"}",
"for",
"key",
"in",
"paths",
":",
"p_key",
"=",
"'libvirt.{0}.pem'",
".",
"format",
"(",
"key",
")",
"if",
"p_key",
"not",
"in",
"pillar",
":",
"continue",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"paths",
"[",
"key",
"]",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"paths",
"[",
"key",
"]",
")",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"paths",
"[",
"key",
"]",
")",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"paths",
"[",
"key",
"]",
",",
"'r'",
")",
"as",
"fp_",
":",
"if",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_unicode",
"(",
"fp_",
".",
"read",
"(",
")",
")",
"!=",
"pillar",
"[",
"p_key",
"]",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"key",
"]",
"=",
"'update'",
"else",
":",
"ret",
"[",
"'changes'",
"]",
"[",
"key",
"]",
"=",
"'new'",
"if",
"not",
"ret",
"[",
"'changes'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'All keys are correct'",
"elif",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'result'",
"]",
"=",
"None",
"ret",
"[",
"'comment'",
"]",
"=",
"'Libvirt keys are set to be updated'",
"ret",
"[",
"'changes'",
"]",
"=",
"{",
"}",
"else",
":",
"for",
"key",
"in",
"ret",
"[",
"'changes'",
"]",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"paths",
"[",
"key",
"]",
",",
"'w+'",
")",
"as",
"fp_",
":",
"fp_",
".",
"write",
"(",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_str",
"(",
"pillar",
"[",
"'libvirt.{0}.pem'",
".",
"format",
"(",
"key",
")",
"]",
")",
")",
"ret",
"[",
"'comment'",
"]",
"=",
"'Updated libvirt certs and keys'",
"return",
"ret"
]
| Manage libvirt keys.
name
The name variable used to track the execution
basepath
Defaults to ``/etc/pki``, this is the root location used for libvirt
keys on the hypervisor
The following parameters are optional:
country
The country that the certificate should use. Defaults to US.
.. versionadded:: 2018.3.0
state
The state that the certificate should use. Defaults to Utah.
.. versionadded:: 2018.3.0
locality
The locality that the certificate should use.
Defaults to Salt Lake City.
.. versionadded:: 2018.3.0
organization
The organization that the certificate should use.
Defaults to Salted.
.. versionadded:: 2018.3.0
expiration_days
The number of days that the certificate should be valid for.
Defaults to 365 days (1 year)
.. versionadded:: 2018.3.0 | [
"Manage",
"libvirt",
"keys",
"."
]
| python | train | 31.968421 |
darkfeline/mir.anidb | mir/anidb/anime.py | https://github.com/darkfeline/mir.anidb/blob/a0d25908f85fb1ff4bc595954bfc3f223f1b5acc/mir/anidb/anime.py#L87-L96 | def get_episode_title(episode: Episode) -> int:
"""Get the episode title.
Japanese title is prioritized.
"""
for title in episode.titles:
if title.lang == 'ja':
return title.title
else:
return episode.titles[0].title | [
"def",
"get_episode_title",
"(",
"episode",
":",
"Episode",
")",
"->",
"int",
":",
"for",
"title",
"in",
"episode",
".",
"titles",
":",
"if",
"title",
".",
"lang",
"==",
"'ja'",
":",
"return",
"title",
".",
"title",
"else",
":",
"return",
"episode",
".",
"titles",
"[",
"0",
"]",
".",
"title"
]
| Get the episode title.
Japanese title is prioritized. | [
"Get",
"the",
"episode",
"title",
"."
]
| python | train | 25.6 |
CivicSpleen/ambry | ambry/bundle/bundle.py | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L2024-L2129 | def schema(self, sources=None, tables=None, clean=False, force=False, use_pipeline=False):
"""
Generate destination schemas.
:param sources: If specified, build only destination tables for these sources
:param tables: If specified, build only these tables
:param clean: Delete tables and partitions first
:param force: Population tables even if the table isn't empty
:param use_pipeline: If True, use the build pipeline to determine columns. If False,
:return: True on success.
"""
from itertools import groupby
from operator import attrgetter
from ambry.etl import Collect, Head
from ambry.orm.exc import NotFoundError
self.dstate = self.STATES.BUILDING
self.commit() # Workaround for https://github.com/CivicKnowledge/ambry/issues/171
self.log('---- Schema ----')
resolved_sources = self._resolve_sources(sources, tables, predicate=lambda s: s.is_processable)
if clean:
self.dataset.delete_tables_partitions()
self.commit()
# Group the sources by the destination table name
keyfunc = attrgetter('dest_table')
for t, table_sources in groupby(sorted(resolved_sources, key=keyfunc), keyfunc):
if use_pipeline:
for source in table_sources:
pl = self.pipeline(source)
pl.cast = [ambry.etl.CastSourceColumns]
pl.select_partition = []
pl.write = [Head, Collect]
pl.final = []
self.log_pipeline(pl)
pl.run()
pl.phase = 'build_schema'
self.log_pipeline(pl)
for h, c in zip(pl.write[Collect].headers, pl.write[Collect].rows[1]):
c = t.add_column(name=h, datatype=type(c).__name__ if c is not None else 'str',
update_existing=True)
self.log("Populated destination table '{}' from pipeline '{}'"
.format(t.name, pl.name))
else:
# Get all of the header names, for each source, associating the header position in the table
# with the header, then sort on the postition. This will produce a stream of header names
# that may have duplicates, but which is generally in the order the headers appear in the
# sources. The duplicates are properly handled when we add the columns in add_column()
self.commit()
def source_cols(source):
if source.is_partition and not source.source_table_exists:
return enumerate(source.partition.table.columns)
else:
return enumerate(source.source_table.columns)
columns = sorted(set([(i, col.dest_header, col.datatype, col.description, col.has_codes)
for source in table_sources for i, col in source_cols(source)]))
initial_count = len(t.columns)
for pos, name, datatype, desc, has_codes in columns:
kwds = dict(
name=name,
datatype=datatype,
description=desc,
update_existing=True
)
try:
extant = t.column(name)
except NotFoundError:
extant = None
if extant is None or not extant.description:
kwds['description'] = desc
c = t.add_column(**kwds)
final_count = len(t.columns)
if final_count > initial_count:
diff = final_count - initial_count
self.log("Populated destination table '{}' from source table '{}' with {} columns"
.format(t.name, source.source_table.name, diff))
self.commit()
return True | [
"def",
"schema",
"(",
"self",
",",
"sources",
"=",
"None",
",",
"tables",
"=",
"None",
",",
"clean",
"=",
"False",
",",
"force",
"=",
"False",
",",
"use_pipeline",
"=",
"False",
")",
":",
"from",
"itertools",
"import",
"groupby",
"from",
"operator",
"import",
"attrgetter",
"from",
"ambry",
".",
"etl",
"import",
"Collect",
",",
"Head",
"from",
"ambry",
".",
"orm",
".",
"exc",
"import",
"NotFoundError",
"self",
".",
"dstate",
"=",
"self",
".",
"STATES",
".",
"BUILDING",
"self",
".",
"commit",
"(",
")",
"# Workaround for https://github.com/CivicKnowledge/ambry/issues/171",
"self",
".",
"log",
"(",
"'---- Schema ----'",
")",
"resolved_sources",
"=",
"self",
".",
"_resolve_sources",
"(",
"sources",
",",
"tables",
",",
"predicate",
"=",
"lambda",
"s",
":",
"s",
".",
"is_processable",
")",
"if",
"clean",
":",
"self",
".",
"dataset",
".",
"delete_tables_partitions",
"(",
")",
"self",
".",
"commit",
"(",
")",
"# Group the sources by the destination table name",
"keyfunc",
"=",
"attrgetter",
"(",
"'dest_table'",
")",
"for",
"t",
",",
"table_sources",
"in",
"groupby",
"(",
"sorted",
"(",
"resolved_sources",
",",
"key",
"=",
"keyfunc",
")",
",",
"keyfunc",
")",
":",
"if",
"use_pipeline",
":",
"for",
"source",
"in",
"table_sources",
":",
"pl",
"=",
"self",
".",
"pipeline",
"(",
"source",
")",
"pl",
".",
"cast",
"=",
"[",
"ambry",
".",
"etl",
".",
"CastSourceColumns",
"]",
"pl",
".",
"select_partition",
"=",
"[",
"]",
"pl",
".",
"write",
"=",
"[",
"Head",
",",
"Collect",
"]",
"pl",
".",
"final",
"=",
"[",
"]",
"self",
".",
"log_pipeline",
"(",
"pl",
")",
"pl",
".",
"run",
"(",
")",
"pl",
".",
"phase",
"=",
"'build_schema'",
"self",
".",
"log_pipeline",
"(",
"pl",
")",
"for",
"h",
",",
"c",
"in",
"zip",
"(",
"pl",
".",
"write",
"[",
"Collect",
"]",
".",
"headers",
",",
"pl",
".",
"write",
"[",
"Collect",
"]",
".",
"rows",
"[",
"1",
"]",
")",
":",
"c",
"=",
"t",
".",
"add_column",
"(",
"name",
"=",
"h",
",",
"datatype",
"=",
"type",
"(",
"c",
")",
".",
"__name__",
"if",
"c",
"is",
"not",
"None",
"else",
"'str'",
",",
"update_existing",
"=",
"True",
")",
"self",
".",
"log",
"(",
"\"Populated destination table '{}' from pipeline '{}'\"",
".",
"format",
"(",
"t",
".",
"name",
",",
"pl",
".",
"name",
")",
")",
"else",
":",
"# Get all of the header names, for each source, associating the header position in the table",
"# with the header, then sort on the postition. This will produce a stream of header names",
"# that may have duplicates, but which is generally in the order the headers appear in the",
"# sources. The duplicates are properly handled when we add the columns in add_column()",
"self",
".",
"commit",
"(",
")",
"def",
"source_cols",
"(",
"source",
")",
":",
"if",
"source",
".",
"is_partition",
"and",
"not",
"source",
".",
"source_table_exists",
":",
"return",
"enumerate",
"(",
"source",
".",
"partition",
".",
"table",
".",
"columns",
")",
"else",
":",
"return",
"enumerate",
"(",
"source",
".",
"source_table",
".",
"columns",
")",
"columns",
"=",
"sorted",
"(",
"set",
"(",
"[",
"(",
"i",
",",
"col",
".",
"dest_header",
",",
"col",
".",
"datatype",
",",
"col",
".",
"description",
",",
"col",
".",
"has_codes",
")",
"for",
"source",
"in",
"table_sources",
"for",
"i",
",",
"col",
"in",
"source_cols",
"(",
"source",
")",
"]",
")",
")",
"initial_count",
"=",
"len",
"(",
"t",
".",
"columns",
")",
"for",
"pos",
",",
"name",
",",
"datatype",
",",
"desc",
",",
"has_codes",
"in",
"columns",
":",
"kwds",
"=",
"dict",
"(",
"name",
"=",
"name",
",",
"datatype",
"=",
"datatype",
",",
"description",
"=",
"desc",
",",
"update_existing",
"=",
"True",
")",
"try",
":",
"extant",
"=",
"t",
".",
"column",
"(",
"name",
")",
"except",
"NotFoundError",
":",
"extant",
"=",
"None",
"if",
"extant",
"is",
"None",
"or",
"not",
"extant",
".",
"description",
":",
"kwds",
"[",
"'description'",
"]",
"=",
"desc",
"c",
"=",
"t",
".",
"add_column",
"(",
"*",
"*",
"kwds",
")",
"final_count",
"=",
"len",
"(",
"t",
".",
"columns",
")",
"if",
"final_count",
">",
"initial_count",
":",
"diff",
"=",
"final_count",
"-",
"initial_count",
"self",
".",
"log",
"(",
"\"Populated destination table '{}' from source table '{}' with {} columns\"",
".",
"format",
"(",
"t",
".",
"name",
",",
"source",
".",
"source_table",
".",
"name",
",",
"diff",
")",
")",
"self",
".",
"commit",
"(",
")",
"return",
"True"
]
| Generate destination schemas.
:param sources: If specified, build only destination tables for these sources
:param tables: If specified, build only these tables
:param clean: Delete tables and partitions first
:param force: Population tables even if the table isn't empty
:param use_pipeline: If True, use the build pipeline to determine columns. If False,
:return: True on success. | [
"Generate",
"destination",
"schemas",
"."
]
| python | train | 38.245283 |
contentful/contentful-management.py | contentful_management/entry.py | https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/entry.py#L54-L64 | def update(self, attributes=None):
"""
Updates the entry with attributes.
"""
if attributes is None:
attributes = {}
attributes['content_type_id'] = self.sys['content_type'].id
return super(Entry, self).update(attributes) | [
"def",
"update",
"(",
"self",
",",
"attributes",
"=",
"None",
")",
":",
"if",
"attributes",
"is",
"None",
":",
"attributes",
"=",
"{",
"}",
"attributes",
"[",
"'content_type_id'",
"]",
"=",
"self",
".",
"sys",
"[",
"'content_type'",
"]",
".",
"id",
"return",
"super",
"(",
"Entry",
",",
"self",
")",
".",
"update",
"(",
"attributes",
")"
]
| Updates the entry with attributes. | [
"Updates",
"the",
"entry",
"with",
"attributes",
"."
]
| python | train | 24.909091 |
ibis-project/ibis | ibis/expr/api.py | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/api.py#L294-L308 | def time(value):
"""
Returns a time literal if value is likely coercible to a time
Parameters
----------
value : time value as string
Returns
--------
result : TimeScalar
"""
if isinstance(value, str):
value = to_time(value)
return literal(value, type=dt.time) | [
"def",
"time",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"value",
"=",
"to_time",
"(",
"value",
")",
"return",
"literal",
"(",
"value",
",",
"type",
"=",
"dt",
".",
"time",
")"
]
| Returns a time literal if value is likely coercible to a time
Parameters
----------
value : time value as string
Returns
--------
result : TimeScalar | [
"Returns",
"a",
"time",
"literal",
"if",
"value",
"is",
"likely",
"coercible",
"to",
"a",
"time"
]
| python | train | 20 |
wal-e/wal-e | wal_e/log_help.py | https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/log_help.py#L145-L158 | def _fmt_structured(d):
"""Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2'
Output is lexically sorted, *except* the time and pid always
come first, to assist with human scanning of the data.
"""
timeEntry = datetime.datetime.utcnow().strftime(
"time=%Y-%m-%dT%H:%M:%S.%f-00")
pidEntry = "pid=" + str(os.getpid())
rest = sorted('='.join([str(k), str(v)])
for (k, v) in list(d.items()))
return ' '.join([timeEntry, pidEntry] + rest) | [
"def",
"_fmt_structured",
"(",
"d",
")",
":",
"timeEntry",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"\"time=%Y-%m-%dT%H:%M:%S.%f-00\"",
")",
"pidEntry",
"=",
"\"pid=\"",
"+",
"str",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"rest",
"=",
"sorted",
"(",
"'='",
".",
"join",
"(",
"[",
"str",
"(",
"k",
")",
",",
"str",
"(",
"v",
")",
"]",
")",
"for",
"(",
"k",
",",
"v",
")",
"in",
"list",
"(",
"d",
".",
"items",
"(",
")",
")",
")",
"return",
"' '",
".",
"join",
"(",
"[",
"timeEntry",
",",
"pidEntry",
"]",
"+",
"rest",
")"
]
| Formats '{k1:v1, k2:v2}' => 'time=... pid=... k1=v1 k2=v2'
Output is lexically sorted, *except* the time and pid always
come first, to assist with human scanning of the data. | [
"Formats",
"{",
"k1",
":",
"v1",
"k2",
":",
"v2",
"}",
"=",
">",
"time",
"=",
"...",
"pid",
"=",
"...",
"k1",
"=",
"v1",
"k2",
"=",
"v2"
]
| python | train | 37.785714 |
dmwm/DBS | Server/Python/src/dbs/dao/Oracle/PrimaryDataset/List.py | https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/PrimaryDataset/List.py#L31-L60 | def execute(self, conn, primary_ds_name="", primary_ds_type="", transaction=False):
"""
Lists all primary datasets if pattern is not provided.
"""
sql = self.sql
binds = {}
#import pdb
#pdb.set_trace()
if primary_ds_name and primary_ds_type in ('', None, '%'):
op = ("=", "like")["%" in primary_ds_name]
sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name" % op
binds.update(primary_ds_name=primary_ds_name)
elif primary_ds_type and primary_ds_name in ('', None, '%'):
op = ("=", "like")["%" in primary_ds_type]
sql += "WHERE PT.PRIMARY_DS_TYPE %s :primary_ds_type" % op
binds.update(primary_ds_type=primary_ds_type)
elif primary_ds_name and primary_ds_type:
op = ("=", "like")["%" in primary_ds_name]
op1 = ("=", "like")["%" in primary_ds_type]
sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name and PT.PRIMARY_DS_TYPE %s :primary_ds_type"\
%(op, op1)
binds.update(primary_ds_name=primary_ds_name)
binds.update(primary_ds_type=primary_ds_type)
else:
pass
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result | [
"def",
"execute",
"(",
"self",
",",
"conn",
",",
"primary_ds_name",
"=",
"\"\"",
",",
"primary_ds_type",
"=",
"\"\"",
",",
"transaction",
"=",
"False",
")",
":",
"sql",
"=",
"self",
".",
"sql",
"binds",
"=",
"{",
"}",
"#import pdb",
"#pdb.set_trace()",
"if",
"primary_ds_name",
"and",
"primary_ds_type",
"in",
"(",
"''",
",",
"None",
",",
"'%'",
")",
":",
"op",
"=",
"(",
"\"=\"",
",",
"\"like\"",
")",
"[",
"\"%\"",
"in",
"primary_ds_name",
"]",
"sql",
"+=",
"\"WHERE P.PRIMARY_DS_NAME %s :primary_ds_name\"",
"%",
"op",
"binds",
".",
"update",
"(",
"primary_ds_name",
"=",
"primary_ds_name",
")",
"elif",
"primary_ds_type",
"and",
"primary_ds_name",
"in",
"(",
"''",
",",
"None",
",",
"'%'",
")",
":",
"op",
"=",
"(",
"\"=\"",
",",
"\"like\"",
")",
"[",
"\"%\"",
"in",
"primary_ds_type",
"]",
"sql",
"+=",
"\"WHERE PT.PRIMARY_DS_TYPE %s :primary_ds_type\"",
"%",
"op",
"binds",
".",
"update",
"(",
"primary_ds_type",
"=",
"primary_ds_type",
")",
"elif",
"primary_ds_name",
"and",
"primary_ds_type",
":",
"op",
"=",
"(",
"\"=\"",
",",
"\"like\"",
")",
"[",
"\"%\"",
"in",
"primary_ds_name",
"]",
"op1",
"=",
"(",
"\"=\"",
",",
"\"like\"",
")",
"[",
"\"%\"",
"in",
"primary_ds_type",
"]",
"sql",
"+=",
"\"WHERE P.PRIMARY_DS_NAME %s :primary_ds_name and PT.PRIMARY_DS_TYPE %s :primary_ds_type\"",
"%",
"(",
"op",
",",
"op1",
")",
"binds",
".",
"update",
"(",
"primary_ds_name",
"=",
"primary_ds_name",
")",
"binds",
".",
"update",
"(",
"primary_ds_type",
"=",
"primary_ds_type",
")",
"else",
":",
"pass",
"cursors",
"=",
"self",
".",
"dbi",
".",
"processData",
"(",
"sql",
",",
"binds",
",",
"conn",
",",
"transaction",
",",
"returnCursor",
"=",
"True",
")",
"result",
"=",
"[",
"]",
"for",
"c",
"in",
"cursors",
":",
"result",
".",
"extend",
"(",
"self",
".",
"formatCursor",
"(",
"c",
",",
"size",
"=",
"100",
")",
")",
"return",
"result"
]
| Lists all primary datasets if pattern is not provided. | [
"Lists",
"all",
"primary",
"datasets",
"if",
"pattern",
"is",
"not",
"provided",
"."
]
| python | train | 46.133333 |
marrabld/planarradpy | libplanarradpy/planrad.py | https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L868-L886 | def batch_parameters(self, saa, sza, p, x, y, g, s, z):
"""Takes lists for parameters and saves them as class properties
:param saa: <list> Sun Azimuth Angle (deg)
:param sza: <list> Sun Zenith Angle (deg)
:param p: <list> Phytoplankton linear scalling factor
:param x: <list> Scattering scaling factor
:param y: <list> Scattering slope factor
:param g: <list> CDOM absorption scaling factor
:param s: <list> CDOM absorption slope factor
:param z: <list> depth (m)"""
self.saa_list = saa
self.sza_list = sza
self.p_list = p
self.x_list = x
self.y_list = y
self.g_list = g
self.s_list = s
self.z_list = z | [
"def",
"batch_parameters",
"(",
"self",
",",
"saa",
",",
"sza",
",",
"p",
",",
"x",
",",
"y",
",",
"g",
",",
"s",
",",
"z",
")",
":",
"self",
".",
"saa_list",
"=",
"saa",
"self",
".",
"sza_list",
"=",
"sza",
"self",
".",
"p_list",
"=",
"p",
"self",
".",
"x_list",
"=",
"x",
"self",
".",
"y_list",
"=",
"y",
"self",
".",
"g_list",
"=",
"g",
"self",
".",
"s_list",
"=",
"s",
"self",
".",
"z_list",
"=",
"z"
]
| Takes lists for parameters and saves them as class properties
:param saa: <list> Sun Azimuth Angle (deg)
:param sza: <list> Sun Zenith Angle (deg)
:param p: <list> Phytoplankton linear scalling factor
:param x: <list> Scattering scaling factor
:param y: <list> Scattering slope factor
:param g: <list> CDOM absorption scaling factor
:param s: <list> CDOM absorption slope factor
:param z: <list> depth (m) | [
"Takes",
"lists",
"for",
"parameters",
"and",
"saves",
"them",
"as",
"class",
"properties"
]
| python | test | 38 |
acutesoftware/AIKIF | scripts/examples/game_of_life_console.py | https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/game_of_life_console.py#L65-L76 | def set_random_starting_grid(lfe):
"""
generate a random grid for game of life using a
set of patterns (just to make it interesting)
"""
cls_patterns = mod_grid.GameOfLifePatterns(25)
print(cls_patterns)
exit(0)
patterns = cls_patterns.get_patterns()
for pattern in patterns:
lfe.set_tile(pattern[0], pattern[1], 1) | [
"def",
"set_random_starting_grid",
"(",
"lfe",
")",
":",
"cls_patterns",
"=",
"mod_grid",
".",
"GameOfLifePatterns",
"(",
"25",
")",
"print",
"(",
"cls_patterns",
")",
"exit",
"(",
"0",
")",
"patterns",
"=",
"cls_patterns",
".",
"get_patterns",
"(",
")",
"for",
"pattern",
"in",
"patterns",
":",
"lfe",
".",
"set_tile",
"(",
"pattern",
"[",
"0",
"]",
",",
"pattern",
"[",
"1",
"]",
",",
"1",
")"
]
| generate a random grid for game of life using a
set of patterns (just to make it interesting) | [
"generate",
"a",
"random",
"grid",
"for",
"game",
"of",
"life",
"using",
"a",
"set",
"of",
"patterns",
"(",
"just",
"to",
"make",
"it",
"interesting",
")"
]
| python | train | 29.5 |
pyviz/holoviews | holoviews/core/element.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/element.py#L29-L61 | def hist(self, dimension=None, num_bins=20, bin_range=None,
adjoin=True, **kwargs):
"""Computes and adjoins histogram along specified dimension(s).
Defaults to first value dimension if present otherwise falls
back to first key dimension.
Args:
dimension: Dimension(s) to compute histogram on
num_bins (int, optional): Number of bins
bin_range (tuple optional): Lower and upper bounds of bins
adjoin (bool, optional): Whether to adjoin histogram
Returns:
AdjointLayout of element and histogram or just the
histogram
"""
from ..operation import histogram
if not isinstance(dimension, list): dimension = [dimension]
hists = []
for d in dimension[::-1]:
hist = histogram(self, num_bins=num_bins, bin_range=bin_range,
dimension=d, **kwargs)
hists.append(hist)
if adjoin:
layout = self
for didx in range(len(dimension)):
layout = layout << hists[didx]
elif len(dimension) > 1:
layout = Layout(hists)
else:
layout = hists[0]
return layout | [
"def",
"hist",
"(",
"self",
",",
"dimension",
"=",
"None",
",",
"num_bins",
"=",
"20",
",",
"bin_range",
"=",
"None",
",",
"adjoin",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
".",
"operation",
"import",
"histogram",
"if",
"not",
"isinstance",
"(",
"dimension",
",",
"list",
")",
":",
"dimension",
"=",
"[",
"dimension",
"]",
"hists",
"=",
"[",
"]",
"for",
"d",
"in",
"dimension",
"[",
":",
":",
"-",
"1",
"]",
":",
"hist",
"=",
"histogram",
"(",
"self",
",",
"num_bins",
"=",
"num_bins",
",",
"bin_range",
"=",
"bin_range",
",",
"dimension",
"=",
"d",
",",
"*",
"*",
"kwargs",
")",
"hists",
".",
"append",
"(",
"hist",
")",
"if",
"adjoin",
":",
"layout",
"=",
"self",
"for",
"didx",
"in",
"range",
"(",
"len",
"(",
"dimension",
")",
")",
":",
"layout",
"=",
"layout",
"<<",
"hists",
"[",
"didx",
"]",
"elif",
"len",
"(",
"dimension",
")",
">",
"1",
":",
"layout",
"=",
"Layout",
"(",
"hists",
")",
"else",
":",
"layout",
"=",
"hists",
"[",
"0",
"]",
"return",
"layout"
]
| Computes and adjoins histogram along specified dimension(s).
Defaults to first value dimension if present otherwise falls
back to first key dimension.
Args:
dimension: Dimension(s) to compute histogram on
num_bins (int, optional): Number of bins
bin_range (tuple optional): Lower and upper bounds of bins
adjoin (bool, optional): Whether to adjoin histogram
Returns:
AdjointLayout of element and histogram or just the
histogram | [
"Computes",
"and",
"adjoins",
"histogram",
"along",
"specified",
"dimension",
"(",
"s",
")",
"."
]
| python | train | 36.848485 |
google/mobly | mobly/controllers/monsoon.py | https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/monsoon.py#L334-L385 | def CollectData(self):
"""Return some current samples. Call StartDataCollection() first.
"""
while 1: # loop until we get data or a timeout
_bytes = self._ReadPacket()
if not _bytes:
return None
if len(_bytes) < 4 + 8 + 1 or _bytes[0] < 0x20 or _bytes[0] > 0x2F:
logging.warning("Wanted data, dropped type=0x%02x, len=%d",
_bytes[0], len(_bytes))
continue
seq, _type, x, y = struct.unpack("BBBB", _bytes[:4])
data = [
struct.unpack(">hhhh", _bytes[x:x + 8])
for x in range(4,
len(_bytes) - 8, 8)
]
if self._last_seq and seq & 0xF != (self._last_seq + 1) & 0xF:
logging.warning("Data sequence skipped, lost packet?")
self._last_seq = seq
if _type == 0:
if not self._coarse_scale or not self._fine_scale:
logging.warning(
"Waiting for calibration, dropped data packet.")
continue
out = []
for main, usb, aux, voltage in data:
if main & 1:
coarse = ((main & ~1) - self._coarse_zero)
out.append(coarse * self._coarse_scale)
else:
out.append((main - self._fine_zero) * self._fine_scale)
return out
elif _type == 1:
self._fine_zero = data[0][0]
self._coarse_zero = data[1][0]
elif _type == 2:
self._fine_ref = data[0][0]
self._coarse_ref = data[1][0]
else:
logging.warning("Discarding data packet type=0x%02x", _type)
continue
# See http://wiki/Main/MonsoonProtocol for details on these values.
if self._coarse_ref != self._coarse_zero:
self._coarse_scale = 2.88 / (
self._coarse_ref - self._coarse_zero)
if self._fine_ref != self._fine_zero:
self._fine_scale = 0.0332 / (self._fine_ref - self._fine_zero) | [
"def",
"CollectData",
"(",
"self",
")",
":",
"while",
"1",
":",
"# loop until we get data or a timeout",
"_bytes",
"=",
"self",
".",
"_ReadPacket",
"(",
")",
"if",
"not",
"_bytes",
":",
"return",
"None",
"if",
"len",
"(",
"_bytes",
")",
"<",
"4",
"+",
"8",
"+",
"1",
"or",
"_bytes",
"[",
"0",
"]",
"<",
"0x20",
"or",
"_bytes",
"[",
"0",
"]",
">",
"0x2F",
":",
"logging",
".",
"warning",
"(",
"\"Wanted data, dropped type=0x%02x, len=%d\"",
",",
"_bytes",
"[",
"0",
"]",
",",
"len",
"(",
"_bytes",
")",
")",
"continue",
"seq",
",",
"_type",
",",
"x",
",",
"y",
"=",
"struct",
".",
"unpack",
"(",
"\"BBBB\"",
",",
"_bytes",
"[",
":",
"4",
"]",
")",
"data",
"=",
"[",
"struct",
".",
"unpack",
"(",
"\">hhhh\"",
",",
"_bytes",
"[",
"x",
":",
"x",
"+",
"8",
"]",
")",
"for",
"x",
"in",
"range",
"(",
"4",
",",
"len",
"(",
"_bytes",
")",
"-",
"8",
",",
"8",
")",
"]",
"if",
"self",
".",
"_last_seq",
"and",
"seq",
"&",
"0xF",
"!=",
"(",
"self",
".",
"_last_seq",
"+",
"1",
")",
"&",
"0xF",
":",
"logging",
".",
"warning",
"(",
"\"Data sequence skipped, lost packet?\"",
")",
"self",
".",
"_last_seq",
"=",
"seq",
"if",
"_type",
"==",
"0",
":",
"if",
"not",
"self",
".",
"_coarse_scale",
"or",
"not",
"self",
".",
"_fine_scale",
":",
"logging",
".",
"warning",
"(",
"\"Waiting for calibration, dropped data packet.\"",
")",
"continue",
"out",
"=",
"[",
"]",
"for",
"main",
",",
"usb",
",",
"aux",
",",
"voltage",
"in",
"data",
":",
"if",
"main",
"&",
"1",
":",
"coarse",
"=",
"(",
"(",
"main",
"&",
"~",
"1",
")",
"-",
"self",
".",
"_coarse_zero",
")",
"out",
".",
"append",
"(",
"coarse",
"*",
"self",
".",
"_coarse_scale",
")",
"else",
":",
"out",
".",
"append",
"(",
"(",
"main",
"-",
"self",
".",
"_fine_zero",
")",
"*",
"self",
".",
"_fine_scale",
")",
"return",
"out",
"elif",
"_type",
"==",
"1",
":",
"self",
".",
"_fine_zero",
"=",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
"self",
".",
"_coarse_zero",
"=",
"data",
"[",
"1",
"]",
"[",
"0",
"]",
"elif",
"_type",
"==",
"2",
":",
"self",
".",
"_fine_ref",
"=",
"data",
"[",
"0",
"]",
"[",
"0",
"]",
"self",
".",
"_coarse_ref",
"=",
"data",
"[",
"1",
"]",
"[",
"0",
"]",
"else",
":",
"logging",
".",
"warning",
"(",
"\"Discarding data packet type=0x%02x\"",
",",
"_type",
")",
"continue",
"# See http://wiki/Main/MonsoonProtocol for details on these values.",
"if",
"self",
".",
"_coarse_ref",
"!=",
"self",
".",
"_coarse_zero",
":",
"self",
".",
"_coarse_scale",
"=",
"2.88",
"/",
"(",
"self",
".",
"_coarse_ref",
"-",
"self",
".",
"_coarse_zero",
")",
"if",
"self",
".",
"_fine_ref",
"!=",
"self",
".",
"_fine_zero",
":",
"self",
".",
"_fine_scale",
"=",
"0.0332",
"/",
"(",
"self",
".",
"_fine_ref",
"-",
"self",
".",
"_fine_zero",
")"
]
| Return some current samples. Call StartDataCollection() first. | [
"Return",
"some",
"current",
"samples",
".",
"Call",
"StartDataCollection",
"()",
"first",
"."
]
| python | train | 42.384615 |
letuananh/chirptext | chirptext/arsenal.py | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/arsenal.py#L85-L103 | def __retrieve(self, key):
''' Retrieve file location from cache DB
'''
with self.get_conn() as conn:
try:
c = conn.cursor()
if key is None:
c.execute("SELECT value FROM cache_entries WHERE key IS NULL")
else:
c.execute("SELECT value FROM cache_entries WHERE key = ?", (key,))
result = c.fetchone()
if result is None or len(result) != 1:
getLogger().info("There's no entry with key={key}".format(key=key))
return None
else:
return result[0]
except:
getLogger().exception("Cannot retrieve")
return None | [
"def",
"__retrieve",
"(",
"self",
",",
"key",
")",
":",
"with",
"self",
".",
"get_conn",
"(",
")",
"as",
"conn",
":",
"try",
":",
"c",
"=",
"conn",
".",
"cursor",
"(",
")",
"if",
"key",
"is",
"None",
":",
"c",
".",
"execute",
"(",
"\"SELECT value FROM cache_entries WHERE key IS NULL\"",
")",
"else",
":",
"c",
".",
"execute",
"(",
"\"SELECT value FROM cache_entries WHERE key = ?\"",
",",
"(",
"key",
",",
")",
")",
"result",
"=",
"c",
".",
"fetchone",
"(",
")",
"if",
"result",
"is",
"None",
"or",
"len",
"(",
"result",
")",
"!=",
"1",
":",
"getLogger",
"(",
")",
".",
"info",
"(",
"\"There's no entry with key={key}\"",
".",
"format",
"(",
"key",
"=",
"key",
")",
")",
"return",
"None",
"else",
":",
"return",
"result",
"[",
"0",
"]",
"except",
":",
"getLogger",
"(",
")",
".",
"exception",
"(",
"\"Cannot retrieve\"",
")",
"return",
"None"
]
| Retrieve file location from cache DB | [
"Retrieve",
"file",
"location",
"from",
"cache",
"DB"
]
| python | train | 39.947368 |
Robpol86/libnl | example_show_wifi_interface.py | https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/example_show_wifi_interface.py#L168-L180 | def setup_logging():
"""Called when __name__ == '__main__' below. Sets up logging library.
All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages.
"""
fmt = 'DBG<0>%(pathname)s:%(lineno)d %(funcName)s: %(message)s'
handler_stderr = logging.StreamHandler(sys.stderr)
handler_stderr.setFormatter(logging.Formatter(fmt))
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
root_logger.addHandler(handler_stderr) | [
"def",
"setup_logging",
"(",
")",
":",
"fmt",
"=",
"'DBG<0>%(pathname)s:%(lineno)d %(funcName)s: %(message)s'",
"handler_stderr",
"=",
"logging",
".",
"StreamHandler",
"(",
"sys",
".",
"stderr",
")",
"handler_stderr",
".",
"setFormatter",
"(",
"logging",
".",
"Formatter",
"(",
"fmt",
")",
")",
"root_logger",
"=",
"logging",
".",
"getLogger",
"(",
")",
"root_logger",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"root_logger",
".",
"addHandler",
"(",
"handler_stderr",
")"
]
| Called when __name__ == '__main__' below. Sets up logging library.
All logging messages go to stderr, from DEBUG to CRITICAL. This script uses print() for regular messages. | [
"Called",
"when",
"__name__",
"==",
"__main__",
"below",
".",
"Sets",
"up",
"logging",
"library",
"."
]
| python | train | 38.769231 |
dbrattli/OSlash | oslash/ioaction.py | https://github.com/dbrattli/OSlash/blob/ffdc714c5d454f7519f740254de89f70850929eb/oslash/ioaction.py#L153-L159 | def run(self, world: int) -> IO:
"""Run IO Action"""
filename, func = self._get_value()
f = self.open_func(filename)
action = func(f.read())
return action(world=world + 1) | [
"def",
"run",
"(",
"self",
",",
"world",
":",
"int",
")",
"->",
"IO",
":",
"filename",
",",
"func",
"=",
"self",
".",
"_get_value",
"(",
")",
"f",
"=",
"self",
".",
"open_func",
"(",
"filename",
")",
"action",
"=",
"func",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"action",
"(",
"world",
"=",
"world",
"+",
"1",
")"
]
| Run IO Action | [
"Run",
"IO",
"Action"
]
| python | train | 29.428571 |
Parquery/icontract | icontract/_recompute.py | https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_recompute.py#L417-L419 | def generic_visit(self, node: ast.AST) -> None:
"""Raise an exception that this node has not been handled."""
raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node)) | [
"def",
"generic_visit",
"(",
"self",
",",
"node",
":",
"ast",
".",
"AST",
")",
"->",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"Unhandled recomputation of the node: {} {}\"",
".",
"format",
"(",
"type",
"(",
"node",
")",
",",
"node",
")",
")"
]
| Raise an exception that this node has not been handled. | [
"Raise",
"an",
"exception",
"that",
"this",
"node",
"has",
"not",
"been",
"handled",
"."
]
| python | train | 73.333333 |
saltstack/salt | salt/modules/lxd.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L1967-L2040 | def profile_create(name, config=None, devices=None, description=None,
remote_addr=None,
cert=None, key=None, verify_cert=True):
''' Creates a profile.
name :
The name of the profile to get.
config :
A config dict or None (None = unset).
Can also be a list:
[{'key': 'boot.autostart', 'value': 1},
{'key': 'security.privileged', 'value': '1'}]
devices :
A device dict or None (None = unset).
description :
A description string or None (None = unset).
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Examples:
.. code-block:: bash
$ salt '*' lxd.profile_create autostart config="{boot.autostart: 1, boot.autostart.delay: 2, boot.autostart.priority: 1}"
$ salt '*' lxd.profile_create shared_mounts devices="{shared_mount: {type: 'disk', source: '/home/shared', path: '/home/shared'}}"
See the `lxd-docs`_ for the details about the config and devices dicts.
.. _lxd-docs: https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-10
'''
client = pylxd_client_get(remote_addr, cert, key, verify_cert)
config, devices = normalize_input_values(
config,
devices
)
try:
profile = client.profiles.create(name, config, devices)
except pylxd.exceptions.LXDAPIException as e:
raise CommandExecutionError(six.text_type(e))
if description is not None:
profile.description = description
pylxd_save_object(profile)
return _pylxd_model_to_dict(profile) | [
"def",
"profile_create",
"(",
"name",
",",
"config",
"=",
"None",
",",
"devices",
"=",
"None",
",",
"description",
"=",
"None",
",",
"remote_addr",
"=",
"None",
",",
"cert",
"=",
"None",
",",
"key",
"=",
"None",
",",
"verify_cert",
"=",
"True",
")",
":",
"client",
"=",
"pylxd_client_get",
"(",
"remote_addr",
",",
"cert",
",",
"key",
",",
"verify_cert",
")",
"config",
",",
"devices",
"=",
"normalize_input_values",
"(",
"config",
",",
"devices",
")",
"try",
":",
"profile",
"=",
"client",
".",
"profiles",
".",
"create",
"(",
"name",
",",
"config",
",",
"devices",
")",
"except",
"pylxd",
".",
"exceptions",
".",
"LXDAPIException",
"as",
"e",
":",
"raise",
"CommandExecutionError",
"(",
"six",
".",
"text_type",
"(",
"e",
")",
")",
"if",
"description",
"is",
"not",
"None",
":",
"profile",
".",
"description",
"=",
"description",
"pylxd_save_object",
"(",
"profile",
")",
"return",
"_pylxd_model_to_dict",
"(",
"profile",
")"
]
| Creates a profile.
name :
The name of the profile to get.
config :
A config dict or None (None = unset).
Can also be a list:
[{'key': 'boot.autostart', 'value': 1},
{'key': 'security.privileged', 'value': '1'}]
devices :
A device dict or None (None = unset).
description :
A description string or None (None = unset).
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Examples:
.. code-block:: bash
$ salt '*' lxd.profile_create autostart config="{boot.autostart: 1, boot.autostart.delay: 2, boot.autostart.priority: 1}"
$ salt '*' lxd.profile_create shared_mounts devices="{shared_mount: {type: 'disk', source: '/home/shared', path: '/home/shared'}}"
See the `lxd-docs`_ for the details about the config and devices dicts.
.. _lxd-docs: https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-10 | [
"Creates",
"a",
"profile",
"."
]
| python | train | 30.324324 |
alexandrovteam/cpyMSpec | cpyMSpec/spectrum.py | https://github.com/alexandrovteam/cpyMSpec/blob/99d9ea18036d65d2d76744102e9304c5df67a1e1/cpyMSpec/spectrum.py#L159-L175 | def centroids(self, window_size=5):
"""
Detects peaks in raw data.
:param mzs: sorted array of m/z values
:param intensities: array of corresponding intensities
:param window_size: size of m/z averaging window
:returns: isotope pattern containing the centroids
:rtype: CentroidedSpectrum
"""
self.sortByMass()
mzs = _cffi_buffer(self.masses, 'd')
intensities = _cffi_buffer(self.intensities, 'f')
n = self.size
p = ims.spectrum_new_from_raw(n, mzs.ptr, intensities.ptr, int(window_size))
return _new_spectrum(CentroidedSpectrum, p) | [
"def",
"centroids",
"(",
"self",
",",
"window_size",
"=",
"5",
")",
":",
"self",
".",
"sortByMass",
"(",
")",
"mzs",
"=",
"_cffi_buffer",
"(",
"self",
".",
"masses",
",",
"'d'",
")",
"intensities",
"=",
"_cffi_buffer",
"(",
"self",
".",
"intensities",
",",
"'f'",
")",
"n",
"=",
"self",
".",
"size",
"p",
"=",
"ims",
".",
"spectrum_new_from_raw",
"(",
"n",
",",
"mzs",
".",
"ptr",
",",
"intensities",
".",
"ptr",
",",
"int",
"(",
"window_size",
")",
")",
"return",
"_new_spectrum",
"(",
"CentroidedSpectrum",
",",
"p",
")"
]
| Detects peaks in raw data.
:param mzs: sorted array of m/z values
:param intensities: array of corresponding intensities
:param window_size: size of m/z averaging window
:returns: isotope pattern containing the centroids
:rtype: CentroidedSpectrum | [
"Detects",
"peaks",
"in",
"raw",
"data",
"."
]
| python | train | 37 |
pyvisa/pyvisa-sim | pyvisa-sim/devices.py | https://github.com/pyvisa/pyvisa-sim/blob/9836166b6b57c165fc63a276f87fe81f106a4e26/pyvisa-sim/devices.py#L214-L250 | def write(self, data):
"""Write data into the device input buffer.
:param data: single element byte
:type data: bytes
"""
logger.debug('Writing into device input buffer: %r' % data)
if not isinstance(data, bytes):
raise TypeError('data must be an instance of bytes')
if len(data) != 1:
msg = 'data must have a length of 1, not %d'
raise ValueError(msg % len(data))
self._input_buffer.extend(data)
l = len(self._query_eom)
if not self._input_buffer.endswith(self._query_eom):
return
try:
message = bytes(self._input_buffer[:-l])
queries = (message.split(self.delimiter) if self.delimiter
else [message])
for query in queries:
response = self._match(query)
eom = self._response_eom
if response is None:
response = self.error_response('command_error')
if response is not NoResponse:
self._output_buffer.extend(response)
self._output_buffer.extend(eom)
finally:
self._input_buffer = bytearray() | [
"def",
"write",
"(",
"self",
",",
"data",
")",
":",
"logger",
".",
"debug",
"(",
"'Writing into device input buffer: %r'",
"%",
"data",
")",
"if",
"not",
"isinstance",
"(",
"data",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"'data must be an instance of bytes'",
")",
"if",
"len",
"(",
"data",
")",
"!=",
"1",
":",
"msg",
"=",
"'data must have a length of 1, not %d'",
"raise",
"ValueError",
"(",
"msg",
"%",
"len",
"(",
"data",
")",
")",
"self",
".",
"_input_buffer",
".",
"extend",
"(",
"data",
")",
"l",
"=",
"len",
"(",
"self",
".",
"_query_eom",
")",
"if",
"not",
"self",
".",
"_input_buffer",
".",
"endswith",
"(",
"self",
".",
"_query_eom",
")",
":",
"return",
"try",
":",
"message",
"=",
"bytes",
"(",
"self",
".",
"_input_buffer",
"[",
":",
"-",
"l",
"]",
")",
"queries",
"=",
"(",
"message",
".",
"split",
"(",
"self",
".",
"delimiter",
")",
"if",
"self",
".",
"delimiter",
"else",
"[",
"message",
"]",
")",
"for",
"query",
"in",
"queries",
":",
"response",
"=",
"self",
".",
"_match",
"(",
"query",
")",
"eom",
"=",
"self",
".",
"_response_eom",
"if",
"response",
"is",
"None",
":",
"response",
"=",
"self",
".",
"error_response",
"(",
"'command_error'",
")",
"if",
"response",
"is",
"not",
"NoResponse",
":",
"self",
".",
"_output_buffer",
".",
"extend",
"(",
"response",
")",
"self",
".",
"_output_buffer",
".",
"extend",
"(",
"eom",
")",
"finally",
":",
"self",
".",
"_input_buffer",
"=",
"bytearray",
"(",
")"
]
| Write data into the device input buffer.
:param data: single element byte
:type data: bytes | [
"Write",
"data",
"into",
"the",
"device",
"input",
"buffer",
"."
]
| python | train | 32.459459 |
ladybug-tools/ladybug | ladybug/analysisperiod.py | https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/analysisperiod.py#L357-L375 | def is_time_included(self, time):
"""Check if time is included in analysis period.
Return True if time is inside this analysis period,
otherwise return False
Args:
time: A DateTime to be tested
Returns:
A boolean. True if time is included in analysis period
"""
if self._timestamps_data is None:
self._calculate_timestamps()
# time filtering in Ladybug Tools is slightly different than "normal"
# filtering since start hour and end hour will be applied for every day.
# For instance 2/20 9am to 2/22 5pm means hour between 9-17
# during 20, 21 and 22 of Feb.
return time.moy in self._timestamps_data | [
"def",
"is_time_included",
"(",
"self",
",",
"time",
")",
":",
"if",
"self",
".",
"_timestamps_data",
"is",
"None",
":",
"self",
".",
"_calculate_timestamps",
"(",
")",
"# time filtering in Ladybug Tools is slightly different than \"normal\"",
"# filtering since start hour and end hour will be applied for every day.",
"# For instance 2/20 9am to 2/22 5pm means hour between 9-17",
"# during 20, 21 and 22 of Feb.",
"return",
"time",
".",
"moy",
"in",
"self",
".",
"_timestamps_data"
]
| Check if time is included in analysis period.
Return True if time is inside this analysis period,
otherwise return False
Args:
time: A DateTime to be tested
Returns:
A boolean. True if time is included in analysis period | [
"Check",
"if",
"time",
"is",
"included",
"in",
"analysis",
"period",
"."
]
| python | train | 37.684211 |
cgrok/clashroyale | clashroyale/official_api/client.py | https://github.com/cgrok/clashroyale/blob/2618f4da22a84ad3e36d2446e23436d87c423163/clashroyale/official_api/client.py#L450-L459 | def get_all_locations(self, timeout: int=None):
"""Get a list of all locations
Parameters
----------
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.LOCATIONS
return self._get_model(url, timeout=timeout) | [
"def",
"get_all_locations",
"(",
"self",
",",
"timeout",
":",
"int",
"=",
"None",
")",
":",
"url",
"=",
"self",
".",
"api",
".",
"LOCATIONS",
"return",
"self",
".",
"_get_model",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")"
]
| Get a list of all locations
Parameters
----------
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout | [
"Get",
"a",
"list",
"of",
"all",
"locations"
]
| python | valid | 31 |
tamasgal/km3pipe | km3pipe/db.py | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L73-L76 | def read_csv(text, sep="\t"):
"""Create a DataFrame from CSV text"""
import pandas as pd # no top level load to make a faster import of db
return pd.read_csv(StringIO(text), sep="\t") | [
"def",
"read_csv",
"(",
"text",
",",
"sep",
"=",
"\"\\t\"",
")",
":",
"import",
"pandas",
"as",
"pd",
"# no top level load to make a faster import of db",
"return",
"pd",
".",
"read_csv",
"(",
"StringIO",
"(",
"text",
")",
",",
"sep",
"=",
"\"\\t\"",
")"
]
| Create a DataFrame from CSV text | [
"Create",
"a",
"DataFrame",
"from",
"CSV",
"text"
]
| python | train | 48.75 |
cs50/check50 | check50/runner.py | https://github.com/cs50/check50/blob/42c1f0c36baa6a24f69742d74551a9ea7a5ceb33/check50/runner.py#L178-L212 | def run(self, files, working_area):
"""
Run checks concurrently.
Returns a list of CheckResults ordered by declaration order of the checks in the imported module
"""
# Ensure that dictionary is ordered by check declaration order (via self.check_names)
# NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict.
results = {name: None for name in self.check_names}
checks_root = working_area.parent
with futures.ProcessPoolExecutor() as executor:
# Start all checks that have no dependencies
not_done = set(executor.submit(run_check(name, self.checks_spec, checks_root))
for name, _ in self.child_map[None])
not_passed = []
while not_done:
done, not_done = futures.wait(not_done, return_when=futures.FIRST_COMPLETED)
for future in done:
# Get result from completed check
result, state = future.result()
results[result.name] = result
if result.passed:
# Dispatch dependent checks
for child_name, _ in self.child_map[result.name]:
not_done.add(executor.submit(
run_check(child_name, self.checks_spec, checks_root, state)))
else:
not_passed.append(result.name)
for name in not_passed:
self._skip_children(name, results)
return results.values() | [
"def",
"run",
"(",
"self",
",",
"files",
",",
"working_area",
")",
":",
"# Ensure that dictionary is ordered by check declaration order (via self.check_names)",
"# NOTE: Requires CPython 3.6. If we need to support older versions of Python, replace with OrderedDict.",
"results",
"=",
"{",
"name",
":",
"None",
"for",
"name",
"in",
"self",
".",
"check_names",
"}",
"checks_root",
"=",
"working_area",
".",
"parent",
"with",
"futures",
".",
"ProcessPoolExecutor",
"(",
")",
"as",
"executor",
":",
"# Start all checks that have no dependencies",
"not_done",
"=",
"set",
"(",
"executor",
".",
"submit",
"(",
"run_check",
"(",
"name",
",",
"self",
".",
"checks_spec",
",",
"checks_root",
")",
")",
"for",
"name",
",",
"_",
"in",
"self",
".",
"child_map",
"[",
"None",
"]",
")",
"not_passed",
"=",
"[",
"]",
"while",
"not_done",
":",
"done",
",",
"not_done",
"=",
"futures",
".",
"wait",
"(",
"not_done",
",",
"return_when",
"=",
"futures",
".",
"FIRST_COMPLETED",
")",
"for",
"future",
"in",
"done",
":",
"# Get result from completed check",
"result",
",",
"state",
"=",
"future",
".",
"result",
"(",
")",
"results",
"[",
"result",
".",
"name",
"]",
"=",
"result",
"if",
"result",
".",
"passed",
":",
"# Dispatch dependent checks",
"for",
"child_name",
",",
"_",
"in",
"self",
".",
"child_map",
"[",
"result",
".",
"name",
"]",
":",
"not_done",
".",
"add",
"(",
"executor",
".",
"submit",
"(",
"run_check",
"(",
"child_name",
",",
"self",
".",
"checks_spec",
",",
"checks_root",
",",
"state",
")",
")",
")",
"else",
":",
"not_passed",
".",
"append",
"(",
"result",
".",
"name",
")",
"for",
"name",
"in",
"not_passed",
":",
"self",
".",
"_skip_children",
"(",
"name",
",",
"results",
")",
"return",
"results",
".",
"values",
"(",
")"
]
| Run checks concurrently.
Returns a list of CheckResults ordered by declaration order of the checks in the imported module | [
"Run",
"checks",
"concurrently",
".",
"Returns",
"a",
"list",
"of",
"CheckResults",
"ordered",
"by",
"declaration",
"order",
"of",
"the",
"checks",
"in",
"the",
"imported",
"module"
]
| python | train | 45.514286 |
sorgerlab/indra | indra/util/__init__.py | https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/util/__init__.py#L113-L141 | def read_unicode_csv_fileobj(fileobj, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
encoding='utf-8', skiprows=0):
"""fileobj can be a StringIO in Py3, but should be a BytesIO in Py2."""
# Python 3 version
if sys.version_info[0] >= 3:
# Next, get the csv reader, with unicode delimiter and quotechar
csv_reader = csv.reader(fileobj, delimiter=delimiter,
quotechar=quotechar, quoting=quoting,
lineterminator=lineterminator)
# Now, return the (already decoded) unicode csv_reader generator
# Skip rows if necessary
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield row
# Python 2 version
else:
# Next, get the csv reader, passing delimiter and quotechar as
# bytestrings rather than unicode
csv_reader = csv.reader(fileobj, delimiter=delimiter.encode(encoding),
quotechar=quotechar.encode(encoding),
quoting=quoting, lineterminator=lineterminator)
# Iterate over the file and decode each string into unicode
# Skip rows if necessary
for skip_ix in range(skiprows):
next(csv_reader)
for row in csv_reader:
yield [cell.decode(encoding) for cell in row] | [
"def",
"read_unicode_csv_fileobj",
"(",
"fileobj",
",",
"delimiter",
"=",
"','",
",",
"quotechar",
"=",
"'\"'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
",",
"lineterminator",
"=",
"'\\n'",
",",
"encoding",
"=",
"'utf-8'",
",",
"skiprows",
"=",
"0",
")",
":",
"# Python 3 version",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"# Next, get the csv reader, with unicode delimiter and quotechar",
"csv_reader",
"=",
"csv",
".",
"reader",
"(",
"fileobj",
",",
"delimiter",
"=",
"delimiter",
",",
"quotechar",
"=",
"quotechar",
",",
"quoting",
"=",
"quoting",
",",
"lineterminator",
"=",
"lineterminator",
")",
"# Now, return the (already decoded) unicode csv_reader generator",
"# Skip rows if necessary",
"for",
"skip_ix",
"in",
"range",
"(",
"skiprows",
")",
":",
"next",
"(",
"csv_reader",
")",
"for",
"row",
"in",
"csv_reader",
":",
"yield",
"row",
"# Python 2 version",
"else",
":",
"# Next, get the csv reader, passing delimiter and quotechar as",
"# bytestrings rather than unicode",
"csv_reader",
"=",
"csv",
".",
"reader",
"(",
"fileobj",
",",
"delimiter",
"=",
"delimiter",
".",
"encode",
"(",
"encoding",
")",
",",
"quotechar",
"=",
"quotechar",
".",
"encode",
"(",
"encoding",
")",
",",
"quoting",
"=",
"quoting",
",",
"lineterminator",
"=",
"lineterminator",
")",
"# Iterate over the file and decode each string into unicode",
"# Skip rows if necessary",
"for",
"skip_ix",
"in",
"range",
"(",
"skiprows",
")",
":",
"next",
"(",
"csv_reader",
")",
"for",
"row",
"in",
"csv_reader",
":",
"yield",
"[",
"cell",
".",
"decode",
"(",
"encoding",
")",
"for",
"cell",
"in",
"row",
"]"
]
| fileobj can be a StringIO in Py3, but should be a BytesIO in Py2. | [
"fileobj",
"can",
"be",
"a",
"StringIO",
"in",
"Py3",
"but",
"should",
"be",
"a",
"BytesIO",
"in",
"Py2",
"."
]
| python | train | 49.37931 |
odlgroup/odl | odl/space/npy_tensors.py | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/npy_tensors.py#L2010-L2024 | def _pnorm_diagweight(x, p, w):
"""Diagonally weighted p-norm implementation."""
# Ravel both in the same order (w is a numpy array)
order = 'F' if all(a.flags.f_contiguous for a in (x.data, w)) else 'C'
# This is faster than first applying the weights and then summing with
# BLAS dot or nrm2
xp = np.abs(x.data.ravel(order))
if p == float('inf'):
xp *= w.ravel(order)
return np.max(xp)
else:
xp = np.power(xp, p, out=xp)
xp *= w.ravel(order)
return np.sum(xp) ** (1 / p) | [
"def",
"_pnorm_diagweight",
"(",
"x",
",",
"p",
",",
"w",
")",
":",
"# Ravel both in the same order (w is a numpy array)",
"order",
"=",
"'F'",
"if",
"all",
"(",
"a",
".",
"flags",
".",
"f_contiguous",
"for",
"a",
"in",
"(",
"x",
".",
"data",
",",
"w",
")",
")",
"else",
"'C'",
"# This is faster than first applying the weights and then summing with",
"# BLAS dot or nrm2",
"xp",
"=",
"np",
".",
"abs",
"(",
"x",
".",
"data",
".",
"ravel",
"(",
"order",
")",
")",
"if",
"p",
"==",
"float",
"(",
"'inf'",
")",
":",
"xp",
"*=",
"w",
".",
"ravel",
"(",
"order",
")",
"return",
"np",
".",
"max",
"(",
"xp",
")",
"else",
":",
"xp",
"=",
"np",
".",
"power",
"(",
"xp",
",",
"p",
",",
"out",
"=",
"xp",
")",
"xp",
"*=",
"w",
".",
"ravel",
"(",
"order",
")",
"return",
"np",
".",
"sum",
"(",
"xp",
")",
"**",
"(",
"1",
"/",
"p",
")"
]
| Diagonally weighted p-norm implementation. | [
"Diagonally",
"weighted",
"p",
"-",
"norm",
"implementation",
"."
]
| python | train | 35.4 |
michael-lazar/rtv | rtv/packages/praw/__init__.py | https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/__init__.py#L1754-L1790 | def upload_image(self, subreddit, image_path, name=None,
header=False, upload_as=None):
"""Upload an image to the subreddit.
:param image_path: A path to the jpg or png image you want to upload.
:param name: The name to provide the image. When None the name will be
filename less any extension.
:param header: When True, upload the image as the subreddit header.
:param upload_as: Must be `'jpg'`, `'png'` or `None`. When None, this
will match the format of the image itself. In all cases where both
this value and the image format is not png, reddit will also
convert the image mode to RGBA. reddit optimizes the image
according to this value.
:returns: A link to the uploaded image. Raises an exception otherwise.
"""
if name and header:
raise TypeError('Both name and header cannot be set.')
if upload_as not in (None, 'png', 'jpg'):
raise TypeError("upload_as must be 'jpg', 'png', or None.")
with open(image_path, 'rb') as image:
image_type = upload_as or _image_type(image)
data = {'r': six.text_type(subreddit), 'img_type': image_type}
if header:
data['header'] = 1
else:
if not name:
name = os.path.splitext(os.path.basename(image.name))[0]
data['name'] = name
response = json.loads(self._request(
self.config['upload_image'], data=data, files={'file': image},
method=to_native_string('POST'), retry_on_error=False))
if response['errors']:
raise errors.APIException(response['errors'], None)
return response['img_src'] | [
"def",
"upload_image",
"(",
"self",
",",
"subreddit",
",",
"image_path",
",",
"name",
"=",
"None",
",",
"header",
"=",
"False",
",",
"upload_as",
"=",
"None",
")",
":",
"if",
"name",
"and",
"header",
":",
"raise",
"TypeError",
"(",
"'Both name and header cannot be set.'",
")",
"if",
"upload_as",
"not",
"in",
"(",
"None",
",",
"'png'",
",",
"'jpg'",
")",
":",
"raise",
"TypeError",
"(",
"\"upload_as must be 'jpg', 'png', or None.\"",
")",
"with",
"open",
"(",
"image_path",
",",
"'rb'",
")",
"as",
"image",
":",
"image_type",
"=",
"upload_as",
"or",
"_image_type",
"(",
"image",
")",
"data",
"=",
"{",
"'r'",
":",
"six",
".",
"text_type",
"(",
"subreddit",
")",
",",
"'img_type'",
":",
"image_type",
"}",
"if",
"header",
":",
"data",
"[",
"'header'",
"]",
"=",
"1",
"else",
":",
"if",
"not",
"name",
":",
"name",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"image",
".",
"name",
")",
")",
"[",
"0",
"]",
"data",
"[",
"'name'",
"]",
"=",
"name",
"response",
"=",
"json",
".",
"loads",
"(",
"self",
".",
"_request",
"(",
"self",
".",
"config",
"[",
"'upload_image'",
"]",
",",
"data",
"=",
"data",
",",
"files",
"=",
"{",
"'file'",
":",
"image",
"}",
",",
"method",
"=",
"to_native_string",
"(",
"'POST'",
")",
",",
"retry_on_error",
"=",
"False",
")",
")",
"if",
"response",
"[",
"'errors'",
"]",
":",
"raise",
"errors",
".",
"APIException",
"(",
"response",
"[",
"'errors'",
"]",
",",
"None",
")",
"return",
"response",
"[",
"'img_src'",
"]"
]
| Upload an image to the subreddit.
:param image_path: A path to the jpg or png image you want to upload.
:param name: The name to provide the image. When None the name will be
filename less any extension.
:param header: When True, upload the image as the subreddit header.
:param upload_as: Must be `'jpg'`, `'png'` or `None`. When None, this
will match the format of the image itself. In all cases where both
this value and the image format is not png, reddit will also
convert the image mode to RGBA. reddit optimizes the image
according to this value.
:returns: A link to the uploaded image. Raises an exception otherwise. | [
"Upload",
"an",
"image",
"to",
"the",
"subreddit",
"."
]
| python | train | 47.783784 |
Dav0815/TransportNSW | TransportNSW/TransportNSW.py | https://github.com/Dav0815/TransportNSW/blob/828aae948fd26bb2ce89637ed639129b4cfdf62a/TransportNSW/TransportNSW.py#L36-L115 | def get_departures(self, stop_id, route, destination, api_key):
"""Get the latest data from Transport NSW."""
self.stop_id = stop_id
self.route = route
self.destination = destination
self.api_key = api_key
# Build the URL including the STOP_ID and the API key
url = \
'https://api.transport.nsw.gov.au/v1/tp/departure_mon?' \
'outputFormat=rapidJSON&coordOutputFormat=EPSG%3A4326&' \
'mode=direct&type_dm=stop&name_dm=' \
+ self.stop_id \
+ '&departureMonitorMacro=true&TfNSWDM=true&version=10.2.1.42'
auth = 'apikey ' + self.api_key
header = {'Accept': 'application/json', 'Authorization': auth}
# Send query or return error
try:
response = requests.get(url, headers=header, timeout=10)
except:
logger.warning("Network or Timeout error")
return self.info
# If there is no valid request
if response.status_code != 200:
logger.warning("Error with the request sent; check api key")
return self.info
# Parse the result as a JSON object
result = response.json()
# If there is no stop events for the query
try:
result['stopEvents']
except KeyError:
logger.warning("No stop events for this query")
return self.info
# Set variables
maxresults = 1
monitor = []
if self.destination != '':
for i in range(len(result['stopEvents'])):
destination = result['stopEvents'][i]['transportation']['destination']['name']
if destination == self.destination:
event = self.parseEvent(result, i)
if event != None:
monitor.append(event)
if len(monitor) >= maxresults:
# We found enough results, lets stop
break
elif self.route != '':
# Find the next stop events for a specific route
for i in range(len(result['stopEvents'])):
number = result['stopEvents'][i]['transportation']['number']
if number == self.route:
event = self.parseEvent(result, i)
if event != None:
monitor.append(event)
if len(monitor) >= maxresults:
# We found enough results, lets stop
break
else:
# No route defined, find any route leaving next
for i in range(0, maxresults):
event = self.parseEvent(result, i)
if event != None:
monitor.append(event)
if monitor:
self.info = {
ATTR_STOP_ID: self.stop_id,
ATTR_ROUTE: monitor[0][0],
ATTR_DUE_IN: monitor[0][1],
ATTR_DELAY: monitor[0][2],
ATTR_REALTIME: monitor[0][5],
ATTR_DESTINATION: monitor[0][6],
ATTR_MODE: monitor[0][7]
}
return self.info | [
"def",
"get_departures",
"(",
"self",
",",
"stop_id",
",",
"route",
",",
"destination",
",",
"api_key",
")",
":",
"self",
".",
"stop_id",
"=",
"stop_id",
"self",
".",
"route",
"=",
"route",
"self",
".",
"destination",
"=",
"destination",
"self",
".",
"api_key",
"=",
"api_key",
"# Build the URL including the STOP_ID and the API key",
"url",
"=",
"'https://api.transport.nsw.gov.au/v1/tp/departure_mon?'",
"'outputFormat=rapidJSON&coordOutputFormat=EPSG%3A4326&'",
"'mode=direct&type_dm=stop&name_dm='",
"+",
"self",
".",
"stop_id",
"+",
"'&departureMonitorMacro=true&TfNSWDM=true&version=10.2.1.42'",
"auth",
"=",
"'apikey '",
"+",
"self",
".",
"api_key",
"header",
"=",
"{",
"'Accept'",
":",
"'application/json'",
",",
"'Authorization'",
":",
"auth",
"}",
"# Send query or return error",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"header",
",",
"timeout",
"=",
"10",
")",
"except",
":",
"logger",
".",
"warning",
"(",
"\"Network or Timeout error\"",
")",
"return",
"self",
".",
"info",
"# If there is no valid request",
"if",
"response",
".",
"status_code",
"!=",
"200",
":",
"logger",
".",
"warning",
"(",
"\"Error with the request sent; check api key\"",
")",
"return",
"self",
".",
"info",
"# Parse the result as a JSON object",
"result",
"=",
"response",
".",
"json",
"(",
")",
"# If there is no stop events for the query",
"try",
":",
"result",
"[",
"'stopEvents'",
"]",
"except",
"KeyError",
":",
"logger",
".",
"warning",
"(",
"\"No stop events for this query\"",
")",
"return",
"self",
".",
"info",
"# Set variables",
"maxresults",
"=",
"1",
"monitor",
"=",
"[",
"]",
"if",
"self",
".",
"destination",
"!=",
"''",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
"[",
"'stopEvents'",
"]",
")",
")",
":",
"destination",
"=",
"result",
"[",
"'stopEvents'",
"]",
"[",
"i",
"]",
"[",
"'transportation'",
"]",
"[",
"'destination'",
"]",
"[",
"'name'",
"]",
"if",
"destination",
"==",
"self",
".",
"destination",
":",
"event",
"=",
"self",
".",
"parseEvent",
"(",
"result",
",",
"i",
")",
"if",
"event",
"!=",
"None",
":",
"monitor",
".",
"append",
"(",
"event",
")",
"if",
"len",
"(",
"monitor",
")",
">=",
"maxresults",
":",
"# We found enough results, lets stop",
"break",
"elif",
"self",
".",
"route",
"!=",
"''",
":",
"# Find the next stop events for a specific route",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"result",
"[",
"'stopEvents'",
"]",
")",
")",
":",
"number",
"=",
"result",
"[",
"'stopEvents'",
"]",
"[",
"i",
"]",
"[",
"'transportation'",
"]",
"[",
"'number'",
"]",
"if",
"number",
"==",
"self",
".",
"route",
":",
"event",
"=",
"self",
".",
"parseEvent",
"(",
"result",
",",
"i",
")",
"if",
"event",
"!=",
"None",
":",
"monitor",
".",
"append",
"(",
"event",
")",
"if",
"len",
"(",
"monitor",
")",
">=",
"maxresults",
":",
"# We found enough results, lets stop",
"break",
"else",
":",
"# No route defined, find any route leaving next",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"maxresults",
")",
":",
"event",
"=",
"self",
".",
"parseEvent",
"(",
"result",
",",
"i",
")",
"if",
"event",
"!=",
"None",
":",
"monitor",
".",
"append",
"(",
"event",
")",
"if",
"monitor",
":",
"self",
".",
"info",
"=",
"{",
"ATTR_STOP_ID",
":",
"self",
".",
"stop_id",
",",
"ATTR_ROUTE",
":",
"monitor",
"[",
"0",
"]",
"[",
"0",
"]",
",",
"ATTR_DUE_IN",
":",
"monitor",
"[",
"0",
"]",
"[",
"1",
"]",
",",
"ATTR_DELAY",
":",
"monitor",
"[",
"0",
"]",
"[",
"2",
"]",
",",
"ATTR_REALTIME",
":",
"monitor",
"[",
"0",
"]",
"[",
"5",
"]",
",",
"ATTR_DESTINATION",
":",
"monitor",
"[",
"0",
"]",
"[",
"6",
"]",
",",
"ATTR_MODE",
":",
"monitor",
"[",
"0",
"]",
"[",
"7",
"]",
"}",
"return",
"self",
".",
"info"
]
| Get the latest data from Transport NSW. | [
"Get",
"the",
"latest",
"data",
"from",
"Transport",
"NSW",
"."
]
| python | train | 38.975 |
Alignak-monitoring/alignak | alignak/objects/satellitelink.py | https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/satellitelink.py#L1005-L1013 | def linkify(self, modules):
"""Link modules and Satellite links
:param modules: Module object list
:type modules: alignak.objects.module.Modules
:return: None
"""
logger.debug("Linkify %s with %s", self, modules)
self.linkify_s_by_module(modules) | [
"def",
"linkify",
"(",
"self",
",",
"modules",
")",
":",
"logger",
".",
"debug",
"(",
"\"Linkify %s with %s\"",
",",
"self",
",",
"modules",
")",
"self",
".",
"linkify_s_by_module",
"(",
"modules",
")"
]
| Link modules and Satellite links
:param modules: Module object list
:type modules: alignak.objects.module.Modules
:return: None | [
"Link",
"modules",
"and",
"Satellite",
"links"
]
| python | train | 32.777778 |
keras-rl/keras-rl | rl/memory.py | https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L120-L144 | def get_recent_state(self, current_observation):
"""Return list of last observations
# Argument
current_observation (object): Last observation
# Returns
A list of the last observations
"""
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state = [current_observation]
idx = len(self.recent_observations) - 1
for offset in range(0, self.window_length - 1):
current_idx = idx - offset
current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False
if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal):
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state.insert(0, self.recent_observations[current_idx])
while len(state) < self.window_length:
state.insert(0, zeroed_observation(state[0]))
return state | [
"def",
"get_recent_state",
"(",
"self",
",",
"current_observation",
")",
":",
"# This code is slightly complicated by the fact that subsequent observations might be",
"# from different episodes. We ensure that an experience never spans multiple episodes.",
"# This is probably not that important in practice but it seems cleaner.",
"state",
"=",
"[",
"current_observation",
"]",
"idx",
"=",
"len",
"(",
"self",
".",
"recent_observations",
")",
"-",
"1",
"for",
"offset",
"in",
"range",
"(",
"0",
",",
"self",
".",
"window_length",
"-",
"1",
")",
":",
"current_idx",
"=",
"idx",
"-",
"offset",
"current_terminal",
"=",
"self",
".",
"recent_terminals",
"[",
"current_idx",
"-",
"1",
"]",
"if",
"current_idx",
"-",
"1",
">=",
"0",
"else",
"False",
"if",
"current_idx",
"<",
"0",
"or",
"(",
"not",
"self",
".",
"ignore_episode_boundaries",
"and",
"current_terminal",
")",
":",
"# The previously handled observation was terminal, don't add the current one.",
"# Otherwise we would leak into a different episode.",
"break",
"state",
".",
"insert",
"(",
"0",
",",
"self",
".",
"recent_observations",
"[",
"current_idx",
"]",
")",
"while",
"len",
"(",
"state",
")",
"<",
"self",
".",
"window_length",
":",
"state",
".",
"insert",
"(",
"0",
",",
"zeroed_observation",
"(",
"state",
"[",
"0",
"]",
")",
")",
"return",
"state"
]
| Return list of last observations
# Argument
current_observation (object): Last observation
# Returns
A list of the last observations | [
"Return",
"list",
"of",
"last",
"observations"
]
| python | train | 49.88 |
frnsys/broca | broca/knowledge/util.py | https://github.com/frnsys/broca/blob/7236dcf54edc0a4a54a55eb93be30800910667e7/broca/knowledge/util.py#L18-L26 | def _count(dicts):
"""
Merge a list of dicts, summing their values.
"""
counts = defaultdict(int)
for d in dicts:
for k, v in d.items():
counts[k] += v
return counts | [
"def",
"_count",
"(",
"dicts",
")",
":",
"counts",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"d",
"in",
"dicts",
":",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"counts",
"[",
"k",
"]",
"+=",
"v",
"return",
"counts"
]
| Merge a list of dicts, summing their values. | [
"Merge",
"a",
"list",
"of",
"dicts",
"summing",
"their",
"values",
"."
]
| python | train | 22.333333 |
PmagPy/PmagPy | pmagpy/ipmag.py | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L7630-L7733 | def iplot_hys(fignum, B, M, s):
"""
function to plot hysteresis data
This function has been adapted from pmagplotlib.iplot_hys for specific use
within a Jupyter notebook.
Parameters
-----------
fignum : reference number for matplotlib figure being created
B : list of B (flux density) values of hysteresis experiment
M : list of M (magnetization) values of hysteresis experiment
s : specimen name
"""
if fignum != 0:
plt.figure(num=fignum)
plt.clf()
hpars = {}
# close up loop
Npts = len(M)
B70 = 0.7 * B[0] # 70 percent of maximum field
for b in B:
if b < B70:
break
Nint = B.index(b) - 1
if Nint > 30:
Nint = 30
if Nint < 10:
Nint = 10
Bzero, Mzero, Mfix, Mnorm, Madj, MadjN = "", "", [], [], [], []
Mazero = ""
m_init = 0.5 * (M[0] + M[1])
m_fin = 0.5 * (M[-1] + M[-2])
diff = m_fin - m_init
Bmin = 0.
for k in range(Npts):
frac = old_div(float(k), float(Npts - 1))
Mfix.append((M[k] - diff * frac))
if Bzero == "" and B[k] < 0:
Bzero = k
if B[k] < Bmin:
Bmin = B[k]
kmin = k
# adjust slope with first 30 data points (throwing out first 3)
Bslop = B[2:Nint + 2]
Mslop = Mfix[2:Nint + 2]
polyU = polyfit(Bslop, Mslop, 1) # best fit line to high field points
# adjust slope with first 30 points of ascending branch
Bslop = B[kmin:kmin + (Nint + 1)]
Mslop = Mfix[kmin:kmin + (Nint + 1)]
polyL = polyfit(Bslop, Mslop, 1) # best fit line to high field points
xhf = 0.5 * (polyU[0] + polyL[0]) # mean of two slopes
# convert B to A/m, high field slope in m^3
hpars['hysteresis_xhf'] = '%8.2e' % (xhf * 4 * np.pi * 1e-7)
meanint = 0.5 * (polyU[1] + polyL[1]) # mean of two intercepts
Msat = 0.5 * (polyU[1] - polyL[1]) # mean of saturation remanence
Moff = []
for k in range(Npts):
# take out linear slope and offset (makes symmetric about origin)
Moff.append((Mfix[k] - xhf * B[k] - meanint))
if Mzero == "" and Moff[k] < 0:
Mzero = k
if Mzero != "" and Mazero == "" and Moff[k] > 0:
Mazero = k
hpars['hysteresis_ms_moment'] = '%8.3e' % (Msat) # Ms in Am^2
#
# split into upper and lower loops for splining
Mupper, Bupper, Mlower, Blower = [], [], [], []
deltaM, Bdm = [], [] # diff between upper and lower curves at Bdm
for k in range(kmin - 2, 0, -2):
Mupper.append(old_div(Moff[k], Msat))
Bupper.append(B[k])
for k in range(kmin + 2, len(B)-1):
Mlower.append(Moff[k] / Msat)
Blower.append(B[k])
Iupper = spline.Spline(Bupper, Mupper) # get splines for upper up and down
Ilower = spline.Spline(Blower, Mlower) # get splines for lower
for b in np.arange(B[0]): # get range of field values
Mpos = ((Iupper(b) - Ilower(b))) # evaluate on both sides of B
Mneg = ((Iupper(-b) - Ilower(-b)))
Bdm.append(b)
deltaM.append(0.5 * (Mpos + Mneg)) # take average delta M
print('whew')
for k in range(Npts):
MadjN.append(old_div(Moff[k], Msat))
Mnorm.append(old_div(M[k], Msat))
# find Mr : average of two spline fits evaluted at B=0 (times Msat)
Mr = Msat * 0.5 * (Iupper(0.) - Ilower(0.))
hpars['hysteresis_mr_moment'] = '%8.3e' % (Mr)
# find Bc (x intercept), interpolate between two bounding points
Bz = B[Mzero - 1:Mzero + 1]
Mz = Moff[Mzero - 1:Mzero + 1]
Baz = B[Mazero - 1:Mazero + 1]
Maz = Moff[Mazero - 1:Mazero + 1]
try:
poly = polyfit(Bz, Mz, 1) # best fit line through two bounding points
Bc = old_div(-poly[1], poly[0]) # x intercept
# best fit line through two bounding points
poly = polyfit(Baz, Maz, 1)
Bac = old_div(-poly[1], poly[0]) # x intercept
hpars['hysteresis_bc'] = '%8.3e' % (0.5 * (abs(Bc) + abs(Bac)))
except:
hpars['hysteresis_bc'] = '0'
return hpars, deltaM, Bdm, B, Mnorm, MadjN | [
"def",
"iplot_hys",
"(",
"fignum",
",",
"B",
",",
"M",
",",
"s",
")",
":",
"if",
"fignum",
"!=",
"0",
":",
"plt",
".",
"figure",
"(",
"num",
"=",
"fignum",
")",
"plt",
".",
"clf",
"(",
")",
"hpars",
"=",
"{",
"}",
"# close up loop",
"Npts",
"=",
"len",
"(",
"M",
")",
"B70",
"=",
"0.7",
"*",
"B",
"[",
"0",
"]",
"# 70 percent of maximum field",
"for",
"b",
"in",
"B",
":",
"if",
"b",
"<",
"B70",
":",
"break",
"Nint",
"=",
"B",
".",
"index",
"(",
"b",
")",
"-",
"1",
"if",
"Nint",
">",
"30",
":",
"Nint",
"=",
"30",
"if",
"Nint",
"<",
"10",
":",
"Nint",
"=",
"10",
"Bzero",
",",
"Mzero",
",",
"Mfix",
",",
"Mnorm",
",",
"Madj",
",",
"MadjN",
"=",
"\"\"",
",",
"\"\"",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"Mazero",
"=",
"\"\"",
"m_init",
"=",
"0.5",
"*",
"(",
"M",
"[",
"0",
"]",
"+",
"M",
"[",
"1",
"]",
")",
"m_fin",
"=",
"0.5",
"*",
"(",
"M",
"[",
"-",
"1",
"]",
"+",
"M",
"[",
"-",
"2",
"]",
")",
"diff",
"=",
"m_fin",
"-",
"m_init",
"Bmin",
"=",
"0.",
"for",
"k",
"in",
"range",
"(",
"Npts",
")",
":",
"frac",
"=",
"old_div",
"(",
"float",
"(",
"k",
")",
",",
"float",
"(",
"Npts",
"-",
"1",
")",
")",
"Mfix",
".",
"append",
"(",
"(",
"M",
"[",
"k",
"]",
"-",
"diff",
"*",
"frac",
")",
")",
"if",
"Bzero",
"==",
"\"\"",
"and",
"B",
"[",
"k",
"]",
"<",
"0",
":",
"Bzero",
"=",
"k",
"if",
"B",
"[",
"k",
"]",
"<",
"Bmin",
":",
"Bmin",
"=",
"B",
"[",
"k",
"]",
"kmin",
"=",
"k",
"# adjust slope with first 30 data points (throwing out first 3)",
"Bslop",
"=",
"B",
"[",
"2",
":",
"Nint",
"+",
"2",
"]",
"Mslop",
"=",
"Mfix",
"[",
"2",
":",
"Nint",
"+",
"2",
"]",
"polyU",
"=",
"polyfit",
"(",
"Bslop",
",",
"Mslop",
",",
"1",
")",
"# best fit line to high field points",
"# adjust slope with first 30 points of ascending branch",
"Bslop",
"=",
"B",
"[",
"kmin",
":",
"kmin",
"+",
"(",
"Nint",
"+",
"1",
")",
"]",
"Mslop",
"=",
"Mfix",
"[",
"kmin",
":",
"kmin",
"+",
"(",
"Nint",
"+",
"1",
")",
"]",
"polyL",
"=",
"polyfit",
"(",
"Bslop",
",",
"Mslop",
",",
"1",
")",
"# best fit line to high field points",
"xhf",
"=",
"0.5",
"*",
"(",
"polyU",
"[",
"0",
"]",
"+",
"polyL",
"[",
"0",
"]",
")",
"# mean of two slopes",
"# convert B to A/m, high field slope in m^3",
"hpars",
"[",
"'hysteresis_xhf'",
"]",
"=",
"'%8.2e'",
"%",
"(",
"xhf",
"*",
"4",
"*",
"np",
".",
"pi",
"*",
"1e-7",
")",
"meanint",
"=",
"0.5",
"*",
"(",
"polyU",
"[",
"1",
"]",
"+",
"polyL",
"[",
"1",
"]",
")",
"# mean of two intercepts",
"Msat",
"=",
"0.5",
"*",
"(",
"polyU",
"[",
"1",
"]",
"-",
"polyL",
"[",
"1",
"]",
")",
"# mean of saturation remanence",
"Moff",
"=",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"Npts",
")",
":",
"# take out linear slope and offset (makes symmetric about origin)",
"Moff",
".",
"append",
"(",
"(",
"Mfix",
"[",
"k",
"]",
"-",
"xhf",
"*",
"B",
"[",
"k",
"]",
"-",
"meanint",
")",
")",
"if",
"Mzero",
"==",
"\"\"",
"and",
"Moff",
"[",
"k",
"]",
"<",
"0",
":",
"Mzero",
"=",
"k",
"if",
"Mzero",
"!=",
"\"\"",
"and",
"Mazero",
"==",
"\"\"",
"and",
"Moff",
"[",
"k",
"]",
">",
"0",
":",
"Mazero",
"=",
"k",
"hpars",
"[",
"'hysteresis_ms_moment'",
"]",
"=",
"'%8.3e'",
"%",
"(",
"Msat",
")",
"# Ms in Am^2",
"#",
"# split into upper and lower loops for splining",
"Mupper",
",",
"Bupper",
",",
"Mlower",
",",
"Blower",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"deltaM",
",",
"Bdm",
"=",
"[",
"]",
",",
"[",
"]",
"# diff between upper and lower curves at Bdm",
"for",
"k",
"in",
"range",
"(",
"kmin",
"-",
"2",
",",
"0",
",",
"-",
"2",
")",
":",
"Mupper",
".",
"append",
"(",
"old_div",
"(",
"Moff",
"[",
"k",
"]",
",",
"Msat",
")",
")",
"Bupper",
".",
"append",
"(",
"B",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"kmin",
"+",
"2",
",",
"len",
"(",
"B",
")",
"-",
"1",
")",
":",
"Mlower",
".",
"append",
"(",
"Moff",
"[",
"k",
"]",
"/",
"Msat",
")",
"Blower",
".",
"append",
"(",
"B",
"[",
"k",
"]",
")",
"Iupper",
"=",
"spline",
".",
"Spline",
"(",
"Bupper",
",",
"Mupper",
")",
"# get splines for upper up and down",
"Ilower",
"=",
"spline",
".",
"Spline",
"(",
"Blower",
",",
"Mlower",
")",
"# get splines for lower",
"for",
"b",
"in",
"np",
".",
"arange",
"(",
"B",
"[",
"0",
"]",
")",
":",
"# get range of field values",
"Mpos",
"=",
"(",
"(",
"Iupper",
"(",
"b",
")",
"-",
"Ilower",
"(",
"b",
")",
")",
")",
"# evaluate on both sides of B",
"Mneg",
"=",
"(",
"(",
"Iupper",
"(",
"-",
"b",
")",
"-",
"Ilower",
"(",
"-",
"b",
")",
")",
")",
"Bdm",
".",
"append",
"(",
"b",
")",
"deltaM",
".",
"append",
"(",
"0.5",
"*",
"(",
"Mpos",
"+",
"Mneg",
")",
")",
"# take average delta M",
"print",
"(",
"'whew'",
")",
"for",
"k",
"in",
"range",
"(",
"Npts",
")",
":",
"MadjN",
".",
"append",
"(",
"old_div",
"(",
"Moff",
"[",
"k",
"]",
",",
"Msat",
")",
")",
"Mnorm",
".",
"append",
"(",
"old_div",
"(",
"M",
"[",
"k",
"]",
",",
"Msat",
")",
")",
"# find Mr : average of two spline fits evaluted at B=0 (times Msat)",
"Mr",
"=",
"Msat",
"*",
"0.5",
"*",
"(",
"Iupper",
"(",
"0.",
")",
"-",
"Ilower",
"(",
"0.",
")",
")",
"hpars",
"[",
"'hysteresis_mr_moment'",
"]",
"=",
"'%8.3e'",
"%",
"(",
"Mr",
")",
"# find Bc (x intercept), interpolate between two bounding points",
"Bz",
"=",
"B",
"[",
"Mzero",
"-",
"1",
":",
"Mzero",
"+",
"1",
"]",
"Mz",
"=",
"Moff",
"[",
"Mzero",
"-",
"1",
":",
"Mzero",
"+",
"1",
"]",
"Baz",
"=",
"B",
"[",
"Mazero",
"-",
"1",
":",
"Mazero",
"+",
"1",
"]",
"Maz",
"=",
"Moff",
"[",
"Mazero",
"-",
"1",
":",
"Mazero",
"+",
"1",
"]",
"try",
":",
"poly",
"=",
"polyfit",
"(",
"Bz",
",",
"Mz",
",",
"1",
")",
"# best fit line through two bounding points",
"Bc",
"=",
"old_div",
"(",
"-",
"poly",
"[",
"1",
"]",
",",
"poly",
"[",
"0",
"]",
")",
"# x intercept",
"# best fit line through two bounding points",
"poly",
"=",
"polyfit",
"(",
"Baz",
",",
"Maz",
",",
"1",
")",
"Bac",
"=",
"old_div",
"(",
"-",
"poly",
"[",
"1",
"]",
",",
"poly",
"[",
"0",
"]",
")",
"# x intercept",
"hpars",
"[",
"'hysteresis_bc'",
"]",
"=",
"'%8.3e'",
"%",
"(",
"0.5",
"*",
"(",
"abs",
"(",
"Bc",
")",
"+",
"abs",
"(",
"Bac",
")",
")",
")",
"except",
":",
"hpars",
"[",
"'hysteresis_bc'",
"]",
"=",
"'0'",
"return",
"hpars",
",",
"deltaM",
",",
"Bdm",
",",
"B",
",",
"Mnorm",
",",
"MadjN"
]
| function to plot hysteresis data
This function has been adapted from pmagplotlib.iplot_hys for specific use
within a Jupyter notebook.
Parameters
-----------
fignum : reference number for matplotlib figure being created
B : list of B (flux density) values of hysteresis experiment
M : list of M (magnetization) values of hysteresis experiment
s : specimen name | [
"function",
"to",
"plot",
"hysteresis",
"data"
]
| python | train | 38.096154 |
caseyjlaw/rtpipe | rtpipe/nbpipeline.py | https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/nbpipeline.py#L64-L73 | def setText(self, label, default='', description='Set Text', format='text'):
""" Set text in a notebook pipeline (via interaction or with nbconvert) """
obj = self.load(label)
if obj == None:
obj=default
self.save(obj, label) # initialize with default
textw = Text(value=obj, description=description)
hndl = interact(self.save, obj=textw, label=fixed(label), format=fixed(format)) | [
"def",
"setText",
"(",
"self",
",",
"label",
",",
"default",
"=",
"''",
",",
"description",
"=",
"'Set Text'",
",",
"format",
"=",
"'text'",
")",
":",
"obj",
"=",
"self",
".",
"load",
"(",
"label",
")",
"if",
"obj",
"==",
"None",
":",
"obj",
"=",
"default",
"self",
".",
"save",
"(",
"obj",
",",
"label",
")",
"# initialize with default",
"textw",
"=",
"Text",
"(",
"value",
"=",
"obj",
",",
"description",
"=",
"description",
")",
"hndl",
"=",
"interact",
"(",
"self",
".",
"save",
",",
"obj",
"=",
"textw",
",",
"label",
"=",
"fixed",
"(",
"label",
")",
",",
"format",
"=",
"fixed",
"(",
"format",
")",
")"
]
| Set text in a notebook pipeline (via interaction or with nbconvert) | [
"Set",
"text",
"in",
"a",
"notebook",
"pipeline",
"(",
"via",
"interaction",
"or",
"with",
"nbconvert",
")"
]
| python | train | 43.8 |
scopus-api/scopus | scopus/author_retrieval.py | https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/author_retrieval.py#L85-L88 | def historical_identifier(self):
"""Scopus IDs of previous profiles now compromising this profile."""
hist = chained_get(self._json, ["coredata", 'historical-identifier'], [])
return [d['$'].split(":")[-1] for d in hist] or None | [
"def",
"historical_identifier",
"(",
"self",
")",
":",
"hist",
"=",
"chained_get",
"(",
"self",
".",
"_json",
",",
"[",
"\"coredata\"",
",",
"'historical-identifier'",
"]",
",",
"[",
"]",
")",
"return",
"[",
"d",
"[",
"'$'",
"]",
".",
"split",
"(",
"\":\"",
")",
"[",
"-",
"1",
"]",
"for",
"d",
"in",
"hist",
"]",
"or",
"None"
]
| Scopus IDs of previous profiles now compromising this profile. | [
"Scopus",
"IDs",
"of",
"previous",
"profiles",
"now",
"compromising",
"this",
"profile",
"."
]
| python | train | 62.25 |
juju/charm-helpers | charmhelpers/contrib/amulet/utils.py | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L730-L761 | def port_knock_tcp(self, host="localhost", port=22, timeout=15):
"""Open a TCP socket to check for a listening sevice on a host.
:param host: host name or IP address, default to localhost
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:returns: True if successful, False if connect failed
"""
# Resolve host name if possible
try:
connect_host = socket.gethostbyname(host)
host_human = "{} ({})".format(connect_host, host)
except socket.error as e:
self.log.warn('Unable to resolve address: '
'{} ({}) Trying anyway!'.format(host, e))
connect_host = host
host_human = connect_host
# Attempt socket connection
try:
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
knock.settimeout(timeout)
knock.connect((connect_host, port))
knock.close()
self.log.debug('Socket connect OK for host '
'{} on port {}.'.format(host_human, port))
return True
except socket.error as e:
self.log.debug('Socket connect FAIL for'
' {} port {} ({})'.format(host_human, port, e))
return False | [
"def",
"port_knock_tcp",
"(",
"self",
",",
"host",
"=",
"\"localhost\"",
",",
"port",
"=",
"22",
",",
"timeout",
"=",
"15",
")",
":",
"# Resolve host name if possible",
"try",
":",
"connect_host",
"=",
"socket",
".",
"gethostbyname",
"(",
"host",
")",
"host_human",
"=",
"\"{} ({})\"",
".",
"format",
"(",
"connect_host",
",",
"host",
")",
"except",
"socket",
".",
"error",
"as",
"e",
":",
"self",
".",
"log",
".",
"warn",
"(",
"'Unable to resolve address: '",
"'{} ({}) Trying anyway!'",
".",
"format",
"(",
"host",
",",
"e",
")",
")",
"connect_host",
"=",
"host",
"host_human",
"=",
"connect_host",
"# Attempt socket connection",
"try",
":",
"knock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"knock",
".",
"settimeout",
"(",
"timeout",
")",
"knock",
".",
"connect",
"(",
"(",
"connect_host",
",",
"port",
")",
")",
"knock",
".",
"close",
"(",
")",
"self",
".",
"log",
".",
"debug",
"(",
"'Socket connect OK for host '",
"'{} on port {}.'",
".",
"format",
"(",
"host_human",
",",
"port",
")",
")",
"return",
"True",
"except",
"socket",
".",
"error",
"as",
"e",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Socket connect FAIL for'",
"' {} port {} ({})'",
".",
"format",
"(",
"host_human",
",",
"port",
",",
"e",
")",
")",
"return",
"False"
]
| Open a TCP socket to check for a listening sevice on a host.
:param host: host name or IP address, default to localhost
:param port: TCP port number, default to 22
:param timeout: Connect timeout, default to 15 seconds
:returns: True if successful, False if connect failed | [
"Open",
"a",
"TCP",
"socket",
"to",
"check",
"for",
"a",
"listening",
"sevice",
"on",
"a",
"host",
"."
]
| python | train | 41.5625 |
lsst-sqre/lsst-projectmeta-kit | lsstprojectmeta/github/urls.py | https://github.com/lsst-sqre/lsst-projectmeta-kit/blob/ac8d4ff65bb93d8fdeb1b46ae6eb5d7414f1ae14/lsstprojectmeta/github/urls.py#L62-L87 | def make_raw_content_url(repo_slug, git_ref, file_path):
"""Make a raw content (raw.githubusercontent.com) URL to a file.
Parameters
----------
repo_slug : `str` or `RepoSlug`
The repository slug, formatted as either a `str` (``'owner/name'``)
or a `RepoSlug` object (created by `parse_repo_slug_from_url`).
git_ref : `str`
The git ref: a branch name, commit hash, or tag name.
file_path : `str`
The POSIX path of the file in the repository tree.
"""
if isinstance(repo_slug, RepoSlug):
slug_str = repo_slug.full
else:
slug_str = repo_slug
if file_path.startswith('/'):
file_path = file_path.lstrip('/')
template = 'https://raw.githubusercontent.com/{slug}/{git_ref}/{path}'
return template.format(
slug=slug_str,
git_ref=git_ref,
path=file_path) | [
"def",
"make_raw_content_url",
"(",
"repo_slug",
",",
"git_ref",
",",
"file_path",
")",
":",
"if",
"isinstance",
"(",
"repo_slug",
",",
"RepoSlug",
")",
":",
"slug_str",
"=",
"repo_slug",
".",
"full",
"else",
":",
"slug_str",
"=",
"repo_slug",
"if",
"file_path",
".",
"startswith",
"(",
"'/'",
")",
":",
"file_path",
"=",
"file_path",
".",
"lstrip",
"(",
"'/'",
")",
"template",
"=",
"'https://raw.githubusercontent.com/{slug}/{git_ref}/{path}'",
"return",
"template",
".",
"format",
"(",
"slug",
"=",
"slug_str",
",",
"git_ref",
"=",
"git_ref",
",",
"path",
"=",
"file_path",
")"
]
| Make a raw content (raw.githubusercontent.com) URL to a file.
Parameters
----------
repo_slug : `str` or `RepoSlug`
The repository slug, formatted as either a `str` (``'owner/name'``)
or a `RepoSlug` object (created by `parse_repo_slug_from_url`).
git_ref : `str`
The git ref: a branch name, commit hash, or tag name.
file_path : `str`
The POSIX path of the file in the repository tree. | [
"Make",
"a",
"raw",
"content",
"(",
"raw",
".",
"githubusercontent",
".",
"com",
")",
"URL",
"to",
"a",
"file",
"."
]
| python | valid | 32.769231 |
huge-success/sanic | sanic/app.py | https://github.com/huge-success/sanic/blob/6a4a3f617fdbe1d3ee8bdc9d1b12ad2d0b34acdd/sanic/app.py#L1115-L1209 | async def create_server(
self,
host: Optional[str] = None,
port: Optional[int] = None,
debug: bool = False,
ssl: Union[dict, SSLContext, None] = None,
sock: Optional[socket] = None,
protocol: Type[Protocol] = None,
backlog: int = 100,
stop_event: Any = None,
access_log: Optional[bool] = None,
return_asyncio_server=False,
asyncio_server_kwargs=None,
) -> None:
"""
Asynchronous version of :func:`run`.
This method will take care of the operations necessary to invoke
the *before_start* events via :func:`trigger_events` method invocation
before starting the *sanic* app in Async mode.
.. note::
This does not support multiprocessing and is not the preferred
way to run a :class:`Sanic` application.
:param host: Address to host on
:type host: str
:param port: Port to host on
:type port: int
:param debug: Enables debug output (slows server)
:type debug: bool
:param ssl: SSLContext, or location of certificate and key
for SSL encryption of worker(s)
:type ssl:SSLContext or dict
:param sock: Socket for the server to accept connections from
:type sock: socket
:param protocol: Subclass of asyncio Protocol class
:type protocol: type[Protocol]
:param backlog: a number of unaccepted connections that the system
will allow before refusing new connections
:type backlog: int
:param stop_event: event to be triggered
before stopping the app - deprecated
:type stop_event: None
:param access_log: Enables writing access logs (slows server)
:type access_log: bool
:param return_asyncio_server: flag that defines whether there's a need
to return asyncio.Server or
start it serving right away
:type return_asyncio_server: bool
:param asyncio_server_kwargs: key-value arguments for
asyncio/uvloop create_server method
:type asyncio_server_kwargs: dict
:return: Nothing
"""
if sock is None:
host, port = host or "127.0.0.1", port or 8000
if protocol is None:
protocol = (
WebSocketProtocol if self.websocket_enabled else HttpProtocol
)
if stop_event is not None:
if debug:
warnings.simplefilter("default")
warnings.warn(
"stop_event will be removed from future versions.",
DeprecationWarning,
)
# if access_log is passed explicitly change config.ACCESS_LOG
if access_log is not None:
self.config.ACCESS_LOG = access_log
server_settings = self._helper(
host=host,
port=port,
debug=debug,
ssl=ssl,
sock=sock,
loop=get_event_loop(),
protocol=protocol,
backlog=backlog,
run_async=return_asyncio_server,
)
# Trigger before_start events
await self.trigger_events(
server_settings.get("before_start", []),
server_settings.get("loop"),
)
return await serve(
asyncio_server_kwargs=asyncio_server_kwargs, **server_settings
) | [
"async",
"def",
"create_server",
"(",
"self",
",",
"host",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"port",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"debug",
":",
"bool",
"=",
"False",
",",
"ssl",
":",
"Union",
"[",
"dict",
",",
"SSLContext",
",",
"None",
"]",
"=",
"None",
",",
"sock",
":",
"Optional",
"[",
"socket",
"]",
"=",
"None",
",",
"protocol",
":",
"Type",
"[",
"Protocol",
"]",
"=",
"None",
",",
"backlog",
":",
"int",
"=",
"100",
",",
"stop_event",
":",
"Any",
"=",
"None",
",",
"access_log",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"return_asyncio_server",
"=",
"False",
",",
"asyncio_server_kwargs",
"=",
"None",
",",
")",
"->",
"None",
":",
"if",
"sock",
"is",
"None",
":",
"host",
",",
"port",
"=",
"host",
"or",
"\"127.0.0.1\"",
",",
"port",
"or",
"8000",
"if",
"protocol",
"is",
"None",
":",
"protocol",
"=",
"(",
"WebSocketProtocol",
"if",
"self",
".",
"websocket_enabled",
"else",
"HttpProtocol",
")",
"if",
"stop_event",
"is",
"not",
"None",
":",
"if",
"debug",
":",
"warnings",
".",
"simplefilter",
"(",
"\"default\"",
")",
"warnings",
".",
"warn",
"(",
"\"stop_event will be removed from future versions.\"",
",",
"DeprecationWarning",
",",
")",
"# if access_log is passed explicitly change config.ACCESS_LOG",
"if",
"access_log",
"is",
"not",
"None",
":",
"self",
".",
"config",
".",
"ACCESS_LOG",
"=",
"access_log",
"server_settings",
"=",
"self",
".",
"_helper",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"debug",
"=",
"debug",
",",
"ssl",
"=",
"ssl",
",",
"sock",
"=",
"sock",
",",
"loop",
"=",
"get_event_loop",
"(",
")",
",",
"protocol",
"=",
"protocol",
",",
"backlog",
"=",
"backlog",
",",
"run_async",
"=",
"return_asyncio_server",
",",
")",
"# Trigger before_start events",
"await",
"self",
".",
"trigger_events",
"(",
"server_settings",
".",
"get",
"(",
"\"before_start\"",
",",
"[",
"]",
")",
",",
"server_settings",
".",
"get",
"(",
"\"loop\"",
")",
",",
")",
"return",
"await",
"serve",
"(",
"asyncio_server_kwargs",
"=",
"asyncio_server_kwargs",
",",
"*",
"*",
"server_settings",
")"
]
| Asynchronous version of :func:`run`.
This method will take care of the operations necessary to invoke
the *before_start* events via :func:`trigger_events` method invocation
before starting the *sanic* app in Async mode.
.. note::
This does not support multiprocessing and is not the preferred
way to run a :class:`Sanic` application.
:param host: Address to host on
:type host: str
:param port: Port to host on
:type port: int
:param debug: Enables debug output (slows server)
:type debug: bool
:param ssl: SSLContext, or location of certificate and key
for SSL encryption of worker(s)
:type ssl:SSLContext or dict
:param sock: Socket for the server to accept connections from
:type sock: socket
:param protocol: Subclass of asyncio Protocol class
:type protocol: type[Protocol]
:param backlog: a number of unaccepted connections that the system
will allow before refusing new connections
:type backlog: int
:param stop_event: event to be triggered
before stopping the app - deprecated
:type stop_event: None
:param access_log: Enables writing access logs (slows server)
:type access_log: bool
:param return_asyncio_server: flag that defines whether there's a need
to return asyncio.Server or
start it serving right away
:type return_asyncio_server: bool
:param asyncio_server_kwargs: key-value arguments for
asyncio/uvloop create_server method
:type asyncio_server_kwargs: dict
:return: Nothing | [
"Asynchronous",
"version",
"of",
":",
"func",
":",
"run",
"."
]
| python | train | 36.021053 |
tensorflow/tensor2tensor | tensor2tensor/utils/hparam.py | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/hparam.py#L470-L480 | def del_hparam(self, name):
"""Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter.
"""
if hasattr(self, name):
delattr(self, name)
del self._hparam_types[name] | [
"def",
"del_hparam",
"(",
"self",
",",
"name",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"name",
")",
":",
"delattr",
"(",
"self",
",",
"name",
")",
"del",
"self",
".",
"_hparam_types",
"[",
"name",
"]"
]
| Removes the hyperparameter with key 'name'.
Does nothing if it isn't present.
Args:
name: Name of the hyperparameter. | [
"Removes",
"the",
"hyperparameter",
"with",
"key",
"name",
"."
]
| python | train | 23.181818 |
nickpandolfi/Cyther | cyther/launcher.py | https://github.com/nickpandolfi/Cyther/blob/9fb0bd77af594008aa6ee8af460aa8c953abf5bc/cyther/launcher.py#L75-L81 | def _get_encodings():
"""
Just a simple function to return the system encoding (defaults to utf-8)
"""
stdout_encoding = sys.stdout.encoding if sys.stdout.encoding else 'utf-8'
stderr_encoding = sys.stderr.encoding if sys.stderr.encoding else 'utf-8'
return stdout_encoding, stderr_encoding | [
"def",
"_get_encodings",
"(",
")",
":",
"stdout_encoding",
"=",
"sys",
".",
"stdout",
".",
"encoding",
"if",
"sys",
".",
"stdout",
".",
"encoding",
"else",
"'utf-8'",
"stderr_encoding",
"=",
"sys",
".",
"stderr",
".",
"encoding",
"if",
"sys",
".",
"stderr",
".",
"encoding",
"else",
"'utf-8'",
"return",
"stdout_encoding",
",",
"stderr_encoding"
]
| Just a simple function to return the system encoding (defaults to utf-8) | [
"Just",
"a",
"simple",
"function",
"to",
"return",
"the",
"system",
"encoding",
"(",
"defaults",
"to",
"utf",
"-",
"8",
")"
]
| python | train | 44 |
mlperf/training | translation/tensorflow/transformer/utils/tokenizer.py | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/tokenizer.py#L87-L126 | def init_from_files(
vocab_file, files, target_vocab_size, threshold, min_count=None,
file_byte_limit=1e6, reserved_tokens=None):
"""Create subtoken vocabulary based on files, and save vocab to file.
Args:
vocab_file: String name of vocab file to store subtoken vocabulary.
files: List of file paths that will be used to generate vocabulary.
target_vocab_size: target vocabulary size to generate.
threshold: int threshold of vocabulary size to accept.
min_count: int minimum count to use for generating the vocabulary. The min
count is the minimum number of times a subtoken should appear in the
files before it is added to the vocabulary. If set to none, this value
is found using binary search.
file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that
will be drawn from the files.
reserved_tokens: List of string tokens that are guaranteed to be at the
beginning of the subtoken vocabulary list.
Returns:
Subtokenizer object
"""
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
if tf.gfile.Exists(vocab_file):
tf.logging.info("Vocab file already exists (%s)" % vocab_file)
else:
tf.logging.info("Begin steps to create subtoken vocabulary...")
token_counts = _count_tokens(files, file_byte_limit)
alphabet = _generate_alphabet_dict(token_counts)
subtoken_list = _generate_subtokens_with_target_vocab_size(
token_counts, alphabet, target_vocab_size, threshold, min_count,
reserved_tokens)
tf.logging.info("Generated vocabulary with %d subtokens." %
len(subtoken_list))
mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE,
value=len(subtoken_list))
_save_vocab_file(vocab_file, subtoken_list)
return Subtokenizer(vocab_file) | [
"def",
"init_from_files",
"(",
"vocab_file",
",",
"files",
",",
"target_vocab_size",
",",
"threshold",
",",
"min_count",
"=",
"None",
",",
"file_byte_limit",
"=",
"1e6",
",",
"reserved_tokens",
"=",
"None",
")",
":",
"if",
"reserved_tokens",
"is",
"None",
":",
"reserved_tokens",
"=",
"RESERVED_TOKENS",
"if",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"vocab_file",
")",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Vocab file already exists (%s)\"",
"%",
"vocab_file",
")",
"else",
":",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Begin steps to create subtoken vocabulary...\"",
")",
"token_counts",
"=",
"_count_tokens",
"(",
"files",
",",
"file_byte_limit",
")",
"alphabet",
"=",
"_generate_alphabet_dict",
"(",
"token_counts",
")",
"subtoken_list",
"=",
"_generate_subtokens_with_target_vocab_size",
"(",
"token_counts",
",",
"alphabet",
",",
"target_vocab_size",
",",
"threshold",
",",
"min_count",
",",
"reserved_tokens",
")",
"tf",
".",
"logging",
".",
"info",
"(",
"\"Generated vocabulary with %d subtokens.\"",
"%",
"len",
"(",
"subtoken_list",
")",
")",
"mlperf_log",
".",
"transformer_print",
"(",
"key",
"=",
"mlperf_log",
".",
"PREPROC_VOCAB_SIZE",
",",
"value",
"=",
"len",
"(",
"subtoken_list",
")",
")",
"_save_vocab_file",
"(",
"vocab_file",
",",
"subtoken_list",
")",
"return",
"Subtokenizer",
"(",
"vocab_file",
")"
]
| Create subtoken vocabulary based on files, and save vocab to file.
Args:
vocab_file: String name of vocab file to store subtoken vocabulary.
files: List of file paths that will be used to generate vocabulary.
target_vocab_size: target vocabulary size to generate.
threshold: int threshold of vocabulary size to accept.
min_count: int minimum count to use for generating the vocabulary. The min
count is the minimum number of times a subtoken should appear in the
files before it is added to the vocabulary. If set to none, this value
is found using binary search.
file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that
will be drawn from the files.
reserved_tokens: List of string tokens that are guaranteed to be at the
beginning of the subtoken vocabulary list.
Returns:
Subtokenizer object | [
"Create",
"subtoken",
"vocabulary",
"based",
"on",
"files",
"and",
"save",
"vocab",
"to",
"file",
"."
]
| python | train | 47.325 |
matthew-brett/delocate | delocate/delocating.py | https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/delocating.py#L292-L300 | def _merge_lib_dict(d1, d2):
""" Merges lib_dict `d2` into lib_dict `d1`
"""
for required, requirings in d2.items():
if required in d1:
d1[required].update(requirings)
else:
d1[required] = requirings
return None | [
"def",
"_merge_lib_dict",
"(",
"d1",
",",
"d2",
")",
":",
"for",
"required",
",",
"requirings",
"in",
"d2",
".",
"items",
"(",
")",
":",
"if",
"required",
"in",
"d1",
":",
"d1",
"[",
"required",
"]",
".",
"update",
"(",
"requirings",
")",
"else",
":",
"d1",
"[",
"required",
"]",
"=",
"requirings",
"return",
"None"
]
| Merges lib_dict `d2` into lib_dict `d1` | [
"Merges",
"lib_dict",
"d2",
"into",
"lib_dict",
"d1"
]
| python | train | 28.777778 |
dask/dask-ml | dask_ml/model_selection/_search.py | https://github.com/dask/dask-ml/blob/cc4837c2c2101f9302cac38354b55754263cd1f3/dask_ml/model_selection/_search.py#L1605-L1609 | def _get_param_iterator(self):
"""Return ParameterSampler instance for the given distributions"""
return model_selection.ParameterSampler(
self.param_distributions, self.n_iter, random_state=self.random_state
) | [
"def",
"_get_param_iterator",
"(",
"self",
")",
":",
"return",
"model_selection",
".",
"ParameterSampler",
"(",
"self",
".",
"param_distributions",
",",
"self",
".",
"n_iter",
",",
"random_state",
"=",
"self",
".",
"random_state",
")"
]
| Return ParameterSampler instance for the given distributions | [
"Return",
"ParameterSampler",
"instance",
"for",
"the",
"given",
"distributions"
]
| python | train | 48.4 |
mixmastamyk/console | console/utils.py | https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/utils.py#L88-L100 | def reset_terminal():
''' Reset the terminal/console screen. (Also aliased to cls.)
Greater than a fullscreen terminal clear, also clears the scrollback
buffer. May expose bugs in dumb terminals.
'''
if os.name == 'nt':
from .windows import cls
cls()
else:
text = sc.reset
_write(text)
return text | [
"def",
"reset_terminal",
"(",
")",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"from",
".",
"windows",
"import",
"cls",
"cls",
"(",
")",
"else",
":",
"text",
"=",
"sc",
".",
"reset",
"_write",
"(",
"text",
")",
"return",
"text"
]
| Reset the terminal/console screen. (Also aliased to cls.)
Greater than a fullscreen terminal clear, also clears the scrollback
buffer. May expose bugs in dumb terminals. | [
"Reset",
"the",
"terminal",
"/",
"console",
"screen",
".",
"(",
"Also",
"aliased",
"to",
"cls",
".",
")"
]
| python | train | 27.615385 |
spyder-ide/spyder | spyder/plugins/editor/widgets/editor.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/editor.py#L597-L726 | def create_shortcuts(self):
"""Create local shortcuts"""
# --- Configurable shortcuts
inspect = config_shortcut(self.inspect_current_object, context='Editor',
name='Inspect current object', parent=self)
set_breakpoint = config_shortcut(self.set_or_clear_breakpoint,
context='Editor', name='Breakpoint',
parent=self)
set_cond_breakpoint = config_shortcut(
self.set_or_edit_conditional_breakpoint,
context='Editor',
name='Conditional breakpoint',
parent=self)
gotoline = config_shortcut(self.go_to_line, context='Editor',
name='Go to line', parent=self)
tab = config_shortcut(lambda: self.tab_navigation_mru(forward=False),
context='Editor',
name='Go to previous file', parent=self)
tabshift = config_shortcut(self.tab_navigation_mru, context='Editor',
name='Go to next file', parent=self)
prevtab = config_shortcut(lambda: self.tabs.tab_navigate(-1),
context='Editor',
name='Cycle to previous file', parent=self)
nexttab = config_shortcut(lambda: self.tabs.tab_navigate(1),
context='Editor',
name='Cycle to next file', parent=self)
run_selection = config_shortcut(self.run_selection, context='Editor',
name='Run selection', parent=self)
new_file = config_shortcut(lambda : self.sig_new_file[()].emit(),
context='Editor', name='New file',
parent=self)
open_file = config_shortcut(lambda : self.plugin_load[()].emit(),
context='Editor', name='Open file',
parent=self)
save_file = config_shortcut(self.save, context='Editor',
name='Save file', parent=self)
save_all = config_shortcut(self.save_all, context='Editor',
name='Save all', parent=self)
save_as = config_shortcut(lambda : self.sig_save_as.emit(),
context='Editor', name='Save As',
parent=self)
close_all = config_shortcut(self.close_all_files, context='Editor',
name='Close all', parent=self)
prev_edit_pos = config_shortcut(lambda : self.sig_prev_edit_pos.emit(),
context="Editor",
name="Last edit location",
parent=self)
prev_cursor = config_shortcut(lambda : self.sig_prev_cursor.emit(),
context="Editor",
name="Previous cursor position",
parent=self)
next_cursor = config_shortcut(lambda : self.sig_next_cursor.emit(),
context="Editor",
name="Next cursor position",
parent=self)
zoom_in_1 = config_shortcut(lambda : self.zoom_in.emit(),
context="Editor",
name="zoom in 1",
parent=self)
zoom_in_2 = config_shortcut(lambda : self.zoom_in.emit(),
context="Editor",
name="zoom in 2",
parent=self)
zoom_out = config_shortcut(lambda : self.zoom_out.emit(),
context="Editor",
name="zoom out",
parent=self)
zoom_reset = config_shortcut(lambda: self.zoom_reset.emit(),
context="Editor",
name="zoom reset",
parent=self)
close_file_1 = config_shortcut(self.close_file,
context="Editor",
name="close file 1",
parent=self)
close_file_2 = config_shortcut(self.close_file,
context="Editor",
name="close file 2",
parent=self)
run_cell = config_shortcut(self.run_cell,
context="Editor",
name="run cell",
parent=self)
run_cell_and_advance = config_shortcut(self.run_cell_and_advance,
context="Editor",
name="run cell and advance",
parent=self)
go_to_next_cell = config_shortcut(self.advance_cell,
context="Editor",
name="go to next cell",
parent=self)
go_to_previous_cell = config_shortcut(lambda: self.advance_cell(reverse=True),
context="Editor",
name="go to previous cell",
parent=self)
re_run_last_cell = config_shortcut(self.re_run_last_cell,
context="Editor",
name="re-run last cell",
parent=self)
prev_warning = config_shortcut(lambda: self.sig_prev_warning.emit(),
context="Editor",
name="Previous warning",
parent=self)
next_warning = config_shortcut(lambda: self.sig_next_warning.emit(),
context="Editor",
name="Next warning",
parent=self)
split_vertically = config_shortcut(lambda: self.sig_split_vertically.emit(),
context="Editor",
name="split vertically",
parent=self)
split_horizontally = config_shortcut(lambda: self.sig_split_horizontally.emit(),
context="Editor",
name="split horizontally",
parent=self)
close_split = config_shortcut(self.close_split,
context="Editor",
name="close split panel",
parent=self)
# Return configurable ones
return [inspect, set_breakpoint, set_cond_breakpoint, gotoline, tab,
tabshift, run_selection, new_file, open_file, save_file,
save_all, save_as, close_all, prev_edit_pos, prev_cursor,
next_cursor, zoom_in_1, zoom_in_2, zoom_out, zoom_reset,
close_file_1, close_file_2, run_cell, run_cell_and_advance,
go_to_next_cell, go_to_previous_cell, re_run_last_cell,
prev_warning, next_warning, split_vertically,
split_horizontally, close_split,
prevtab, nexttab] | [
"def",
"create_shortcuts",
"(",
"self",
")",
":",
"# --- Configurable shortcuts\r",
"inspect",
"=",
"config_shortcut",
"(",
"self",
".",
"inspect_current_object",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Inspect current object'",
",",
"parent",
"=",
"self",
")",
"set_breakpoint",
"=",
"config_shortcut",
"(",
"self",
".",
"set_or_clear_breakpoint",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Breakpoint'",
",",
"parent",
"=",
"self",
")",
"set_cond_breakpoint",
"=",
"config_shortcut",
"(",
"self",
".",
"set_or_edit_conditional_breakpoint",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Conditional breakpoint'",
",",
"parent",
"=",
"self",
")",
"gotoline",
"=",
"config_shortcut",
"(",
"self",
".",
"go_to_line",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Go to line'",
",",
"parent",
"=",
"self",
")",
"tab",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"tab_navigation_mru",
"(",
"forward",
"=",
"False",
")",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Go to previous file'",
",",
"parent",
"=",
"self",
")",
"tabshift",
"=",
"config_shortcut",
"(",
"self",
".",
"tab_navigation_mru",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Go to next file'",
",",
"parent",
"=",
"self",
")",
"prevtab",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"tabs",
".",
"tab_navigate",
"(",
"-",
"1",
")",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Cycle to previous file'",
",",
"parent",
"=",
"self",
")",
"nexttab",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"tabs",
".",
"tab_navigate",
"(",
"1",
")",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Cycle to next file'",
",",
"parent",
"=",
"self",
")",
"run_selection",
"=",
"config_shortcut",
"(",
"self",
".",
"run_selection",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Run selection'",
",",
"parent",
"=",
"self",
")",
"new_file",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"sig_new_file",
"[",
"(",
")",
"]",
".",
"emit",
"(",
")",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'New file'",
",",
"parent",
"=",
"self",
")",
"open_file",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"plugin_load",
"[",
"(",
")",
"]",
".",
"emit",
"(",
")",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Open file'",
",",
"parent",
"=",
"self",
")",
"save_file",
"=",
"config_shortcut",
"(",
"self",
".",
"save",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Save file'",
",",
"parent",
"=",
"self",
")",
"save_all",
"=",
"config_shortcut",
"(",
"self",
".",
"save_all",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Save all'",
",",
"parent",
"=",
"self",
")",
"save_as",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"sig_save_as",
".",
"emit",
"(",
")",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Save As'",
",",
"parent",
"=",
"self",
")",
"close_all",
"=",
"config_shortcut",
"(",
"self",
".",
"close_all_files",
",",
"context",
"=",
"'Editor'",
",",
"name",
"=",
"'Close all'",
",",
"parent",
"=",
"self",
")",
"prev_edit_pos",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"sig_prev_edit_pos",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"Last edit location\"",
",",
"parent",
"=",
"self",
")",
"prev_cursor",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"sig_prev_cursor",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"Previous cursor position\"",
",",
"parent",
"=",
"self",
")",
"next_cursor",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"sig_next_cursor",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"Next cursor position\"",
",",
"parent",
"=",
"self",
")",
"zoom_in_1",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"zoom_in",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"zoom in 1\"",
",",
"parent",
"=",
"self",
")",
"zoom_in_2",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"zoom_in",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"zoom in 2\"",
",",
"parent",
"=",
"self",
")",
"zoom_out",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"zoom_out",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"zoom out\"",
",",
"parent",
"=",
"self",
")",
"zoom_reset",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"zoom_reset",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"zoom reset\"",
",",
"parent",
"=",
"self",
")",
"close_file_1",
"=",
"config_shortcut",
"(",
"self",
".",
"close_file",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"close file 1\"",
",",
"parent",
"=",
"self",
")",
"close_file_2",
"=",
"config_shortcut",
"(",
"self",
".",
"close_file",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"close file 2\"",
",",
"parent",
"=",
"self",
")",
"run_cell",
"=",
"config_shortcut",
"(",
"self",
".",
"run_cell",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"run cell\"",
",",
"parent",
"=",
"self",
")",
"run_cell_and_advance",
"=",
"config_shortcut",
"(",
"self",
".",
"run_cell_and_advance",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"run cell and advance\"",
",",
"parent",
"=",
"self",
")",
"go_to_next_cell",
"=",
"config_shortcut",
"(",
"self",
".",
"advance_cell",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"go to next cell\"",
",",
"parent",
"=",
"self",
")",
"go_to_previous_cell",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"advance_cell",
"(",
"reverse",
"=",
"True",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"go to previous cell\"",
",",
"parent",
"=",
"self",
")",
"re_run_last_cell",
"=",
"config_shortcut",
"(",
"self",
".",
"re_run_last_cell",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"re-run last cell\"",
",",
"parent",
"=",
"self",
")",
"prev_warning",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"sig_prev_warning",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"Previous warning\"",
",",
"parent",
"=",
"self",
")",
"next_warning",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"sig_next_warning",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"Next warning\"",
",",
"parent",
"=",
"self",
")",
"split_vertically",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"sig_split_vertically",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"split vertically\"",
",",
"parent",
"=",
"self",
")",
"split_horizontally",
"=",
"config_shortcut",
"(",
"lambda",
":",
"self",
".",
"sig_split_horizontally",
".",
"emit",
"(",
")",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"split horizontally\"",
",",
"parent",
"=",
"self",
")",
"close_split",
"=",
"config_shortcut",
"(",
"self",
".",
"close_split",
",",
"context",
"=",
"\"Editor\"",
",",
"name",
"=",
"\"close split panel\"",
",",
"parent",
"=",
"self",
")",
"# Return configurable ones\r",
"return",
"[",
"inspect",
",",
"set_breakpoint",
",",
"set_cond_breakpoint",
",",
"gotoline",
",",
"tab",
",",
"tabshift",
",",
"run_selection",
",",
"new_file",
",",
"open_file",
",",
"save_file",
",",
"save_all",
",",
"save_as",
",",
"close_all",
",",
"prev_edit_pos",
",",
"prev_cursor",
",",
"next_cursor",
",",
"zoom_in_1",
",",
"zoom_in_2",
",",
"zoom_out",
",",
"zoom_reset",
",",
"close_file_1",
",",
"close_file_2",
",",
"run_cell",
",",
"run_cell_and_advance",
",",
"go_to_next_cell",
",",
"go_to_previous_cell",
",",
"re_run_last_cell",
",",
"prev_warning",
",",
"next_warning",
",",
"split_vertically",
",",
"split_horizontally",
",",
"close_split",
",",
"prevtab",
",",
"nexttab",
"]"
]
| Create local shortcuts | [
"Create",
"local",
"shortcuts"
]
| python | train | 61.653846 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.