repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
list | docstring
stringlengths 1
46.9k
| docstring_tokens
list | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
lingthio/Flask-User
|
example_apps/pynamodb_app.py
|
https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/example_apps/pynamodb_app.py#L37-L122
|
def create_app():
""" Flask application factory """
# Setup Flask app and app.config
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
# Initialize Flask extensions
db = None
mail = Mail(app) # Initialize Flask-Mail
# Define the User data model. Make sure to add flask_user UserMixin !!!
class UsernameIndex(GlobalSecondaryIndex):
class Meta:
read_capacity_units = 1
write_capacity_units = 1
projection = AllProjection()
username = UnicodeAttribute(hash_key=True)
class EmailIndex(GlobalSecondaryIndex):
class Meta:
read_capacity_units = 1
write_capacity_units = 1
projection = AllProjection()
email = UnicodeAttribute(hash_key=True)
class User(Model, UserMixin):
class Meta:
table_name = 'users'
id = UnicodeAttribute(hash_key=True, default=lambda: str(uuid.uuid1()))
active = BooleanAttribute()
# User authentication information
username = UnicodeAttribute(null=True)
password = UnicodeAttribute(null=True)
username_index = UsernameIndex()
# User email information
email = UnicodeAttribute(null=True)
email_confirmed_at = UTCDateTimeAttribute(null=True)
email_index = EmailIndex()
# User information
first_name = UnicodeAttribute(null=True)
last_name = UnicodeAttribute(null=True)
# Setup Flask-User
user_manager = UserManager(app, db, User)
# Create all database tables
print('create_schema()')
user_manager.db_manager.create_all_tables()
print('created_schema()')
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% block content %}
<h2>Home page</h2>
<p>This page can be accessed by anyone.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def members_page():
return render_template_string("""
{% block content %}
<h2>Members page</h2>
<p>This page can only be accessed by authenticated users.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
{% endblock %}
""")
return app
|
[
"def",
"create_app",
"(",
")",
":",
"# Setup Flask app and app.config",
"app",
"=",
"Flask",
"(",
"__name__",
")",
"app",
".",
"config",
".",
"from_object",
"(",
"__name__",
"+",
"'.ConfigClass'",
")",
"# Initialize Flask extensions",
"db",
"=",
"None",
"mail",
"=",
"Mail",
"(",
"app",
")",
"# Initialize Flask-Mail",
"# Define the User data model. Make sure to add flask_user UserMixin !!!",
"class",
"UsernameIndex",
"(",
"GlobalSecondaryIndex",
")",
":",
"class",
"Meta",
":",
"read_capacity_units",
"=",
"1",
"write_capacity_units",
"=",
"1",
"projection",
"=",
"AllProjection",
"(",
")",
"username",
"=",
"UnicodeAttribute",
"(",
"hash_key",
"=",
"True",
")",
"class",
"EmailIndex",
"(",
"GlobalSecondaryIndex",
")",
":",
"class",
"Meta",
":",
"read_capacity_units",
"=",
"1",
"write_capacity_units",
"=",
"1",
"projection",
"=",
"AllProjection",
"(",
")",
"email",
"=",
"UnicodeAttribute",
"(",
"hash_key",
"=",
"True",
")",
"class",
"User",
"(",
"Model",
",",
"UserMixin",
")",
":",
"class",
"Meta",
":",
"table_name",
"=",
"'users'",
"id",
"=",
"UnicodeAttribute",
"(",
"hash_key",
"=",
"True",
",",
"default",
"=",
"lambda",
":",
"str",
"(",
"uuid",
".",
"uuid1",
"(",
")",
")",
")",
"active",
"=",
"BooleanAttribute",
"(",
")",
"# User authentication information",
"username",
"=",
"UnicodeAttribute",
"(",
"null",
"=",
"True",
")",
"password",
"=",
"UnicodeAttribute",
"(",
"null",
"=",
"True",
")",
"username_index",
"=",
"UsernameIndex",
"(",
")",
"# User email information",
"email",
"=",
"UnicodeAttribute",
"(",
"null",
"=",
"True",
")",
"email_confirmed_at",
"=",
"UTCDateTimeAttribute",
"(",
"null",
"=",
"True",
")",
"email_index",
"=",
"EmailIndex",
"(",
")",
"# User information",
"first_name",
"=",
"UnicodeAttribute",
"(",
"null",
"=",
"True",
")",
"last_name",
"=",
"UnicodeAttribute",
"(",
"null",
"=",
"True",
")",
"# Setup Flask-User",
"user_manager",
"=",
"UserManager",
"(",
"app",
",",
"db",
",",
"User",
")",
"# Create all database tables",
"print",
"(",
"'create_schema()'",
")",
"user_manager",
".",
"db_manager",
".",
"create_all_tables",
"(",
")",
"print",
"(",
"'created_schema()'",
")",
"# The Home page is accessible to anyone",
"@",
"app",
".",
"route",
"(",
"'/'",
")",
"def",
"home_page",
"(",
")",
":",
"return",
"render_template_string",
"(",
"\"\"\"\n {% block content %}\n <h2>Home page</h2>\n <p>This page can be accessed by anyone.</p><br/>\n <p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>\n <p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>\n {% endblock %}\n \"\"\"",
")",
"# The Members page is only accessible to authenticated users",
"@",
"app",
".",
"route",
"(",
"'/members'",
")",
"@",
"login_required",
"# Use of @login_required decorator",
"def",
"members_page",
"(",
")",
":",
"return",
"render_template_string",
"(",
"\"\"\"\n {% block content %}\n <h2>Members page</h2>\n <p>This page can only be accessed by authenticated users.</p><br/>\n <p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>\n <p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>\n {% endblock %}\n \"\"\"",
")",
"return",
"app"
] |
Flask application factory
|
[
"Flask",
"application",
"factory"
] |
python
|
train
|
klen/adrest
|
adrest/mixin/handler.py
|
https://github.com/klen/adrest/blob/8b75c67123cffabe5ed98c222bb7ab43c904d89c/adrest/mixin/handler.py#L130-L145
|
def handle_request(self, request, **resources):
""" Get a method for request and execute.
:return object: method result
"""
if not request.method in self._meta.callmap.keys():
raise HttpError(
'Unknown or unsupported method \'%s\'' % request.method,
status=status.HTTP_501_NOT_IMPLEMENTED)
# Get the appropriate create/read/update/delete function
view = getattr(self, self._meta.callmap[request.method])
# Get function data
return view(request, **resources)
|
[
"def",
"handle_request",
"(",
"self",
",",
"request",
",",
"*",
"*",
"resources",
")",
":",
"if",
"not",
"request",
".",
"method",
"in",
"self",
".",
"_meta",
".",
"callmap",
".",
"keys",
"(",
")",
":",
"raise",
"HttpError",
"(",
"'Unknown or unsupported method \\'%s\\''",
"%",
"request",
".",
"method",
",",
"status",
"=",
"status",
".",
"HTTP_501_NOT_IMPLEMENTED",
")",
"# Get the appropriate create/read/update/delete function",
"view",
"=",
"getattr",
"(",
"self",
",",
"self",
".",
"_meta",
".",
"callmap",
"[",
"request",
".",
"method",
"]",
")",
"# Get function data",
"return",
"view",
"(",
"request",
",",
"*",
"*",
"resources",
")"
] |
Get a method for request and execute.
:return object: method result
|
[
"Get",
"a",
"method",
"for",
"request",
"and",
"execute",
"."
] |
python
|
train
|
watson-developer-cloud/python-sdk
|
ibm_watson/tone_analyzer_v3.py
|
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/tone_analyzer_v3.py#L114-L208
|
def tone(self,
tone_input,
sentences=None,
tones=None,
content_language=None,
accept_language=None,
content_type=None,
**kwargs):
"""
Analyze general tone.
Use the general purpose endpoint to analyze the tone of your input content. The
service analyzes the content for emotional and language tones. The method always
analyzes the tone of the full document; by default, it also analyzes the tone of
each individual sentence of the content.
You can submit no more than 128 KB of total input content and no more than 1000
individual sentences in JSON, plain text, or HTML format. The service analyzes the
first 1000 sentences for document-level analysis and only the first 100 sentences
for sentence-level analysis.
Per the JSON specification, the default character encoding for JSON content is
effectively always UTF-8; per the HTTP specification, the default encoding for
plain text and HTML is ISO-8859-1 (effectively, the ASCII character set). When
specifying a content type of plain text or HTML, include the `charset` parameter
to indicate the character encoding of the input text; for example: `Content-Type:
text/plain;charset=utf-8`. For `text/html`, the service removes HTML tags and
analyzes only the textual content.
**See also:** [Using the general-purpose
endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone.html#using-the-general-purpose-endpoint).
:param ToneInput tone_input: JSON, plain text, or HTML input that contains the
content to be analyzed. For JSON input, provide an object of type `ToneInput`.
:param bool sentences: Indicates whether the service is to return an analysis of
each individual sentence in addition to its analysis of the full document. If
`true` (the default), the service returns results for each sentence.
:param list[str] tones: **`2017-09-21`:** Deprecated. The service continues to
accept the parameter for backward-compatibility, but the parameter no longer
affects the response.
**`2016-05-19`:** A comma-separated list of tones for which the service is to
return its analysis of the input; the indicated tones apply both to the full
document and to individual sentences of the document. You can specify one or more
of the valid values. Omit the parameter to request results for all three tones.
:param str content_language: The language of the input text for the request:
English or French. Regional variants are treated as their parent language; for
example, `en-US` is interpreted as `en`. The input content must match the
specified language. Do not submit content that contains both languages. You can
use different languages for **Content-Language** and **Accept-Language**.
* **`2017-09-21`:** Accepts `en` or `fr`.
* **`2016-05-19`:** Accepts only `en`.
:param str accept_language: The desired language of the response. For
two-character arguments, regional variants are treated as their parent language;
for example, `en-US` is interpreted as `en`. You can use different languages for
**Content-Language** and **Accept-Language**.
:param str content_type: The type of the input. A character encoding can be
specified by including a `charset` parameter. For example,
'text/plain;charset=utf-8'.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if tone_input is None:
raise ValueError('tone_input must be provided')
if isinstance(tone_input, ToneInput):
tone_input = self._convert_model(tone_input, ToneInput)
headers = {
'Content-Language': content_language,
'Accept-Language': accept_language,
'Content-Type': content_type
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('tone_analyzer', 'V3', 'tone')
headers.update(sdk_headers)
params = {
'version': self.version,
'sentences': sentences,
'tones': self._convert_list(tones)
}
if content_type == 'application/json' and isinstance(tone_input, dict):
data = json.dumps(tone_input)
else:
data = tone_input
url = '/v3/tone'
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
data=data,
accept_json=True)
return response
|
[
"def",
"tone",
"(",
"self",
",",
"tone_input",
",",
"sentences",
"=",
"None",
",",
"tones",
"=",
"None",
",",
"content_language",
"=",
"None",
",",
"accept_language",
"=",
"None",
",",
"content_type",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"tone_input",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'tone_input must be provided'",
")",
"if",
"isinstance",
"(",
"tone_input",
",",
"ToneInput",
")",
":",
"tone_input",
"=",
"self",
".",
"_convert_model",
"(",
"tone_input",
",",
"ToneInput",
")",
"headers",
"=",
"{",
"'Content-Language'",
":",
"content_language",
",",
"'Accept-Language'",
":",
"accept_language",
",",
"'Content-Type'",
":",
"content_type",
"}",
"if",
"'headers'",
"in",
"kwargs",
":",
"headers",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'headers'",
")",
")",
"sdk_headers",
"=",
"get_sdk_headers",
"(",
"'tone_analyzer'",
",",
"'V3'",
",",
"'tone'",
")",
"headers",
".",
"update",
"(",
"sdk_headers",
")",
"params",
"=",
"{",
"'version'",
":",
"self",
".",
"version",
",",
"'sentences'",
":",
"sentences",
",",
"'tones'",
":",
"self",
".",
"_convert_list",
"(",
"tones",
")",
"}",
"if",
"content_type",
"==",
"'application/json'",
"and",
"isinstance",
"(",
"tone_input",
",",
"dict",
")",
":",
"data",
"=",
"json",
".",
"dumps",
"(",
"tone_input",
")",
"else",
":",
"data",
"=",
"tone_input",
"url",
"=",
"'/v3/tone'",
"response",
"=",
"self",
".",
"request",
"(",
"method",
"=",
"'POST'",
",",
"url",
"=",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"params",
",",
"data",
"=",
"data",
",",
"accept_json",
"=",
"True",
")",
"return",
"response"
] |
Analyze general tone.
Use the general purpose endpoint to analyze the tone of your input content. The
service analyzes the content for emotional and language tones. The method always
analyzes the tone of the full document; by default, it also analyzes the tone of
each individual sentence of the content.
You can submit no more than 128 KB of total input content and no more than 1000
individual sentences in JSON, plain text, or HTML format. The service analyzes the
first 1000 sentences for document-level analysis and only the first 100 sentences
for sentence-level analysis.
Per the JSON specification, the default character encoding for JSON content is
effectively always UTF-8; per the HTTP specification, the default encoding for
plain text and HTML is ISO-8859-1 (effectively, the ASCII character set). When
specifying a content type of plain text or HTML, include the `charset` parameter
to indicate the character encoding of the input text; for example: `Content-Type:
text/plain;charset=utf-8`. For `text/html`, the service removes HTML tags and
analyzes only the textual content.
**See also:** [Using the general-purpose
endpoint](https://cloud.ibm.com/docs/services/tone-analyzer/using-tone.html#using-the-general-purpose-endpoint).
:param ToneInput tone_input: JSON, plain text, or HTML input that contains the
content to be analyzed. For JSON input, provide an object of type `ToneInput`.
:param bool sentences: Indicates whether the service is to return an analysis of
each individual sentence in addition to its analysis of the full document. If
`true` (the default), the service returns results for each sentence.
:param list[str] tones: **`2017-09-21`:** Deprecated. The service continues to
accept the parameter for backward-compatibility, but the parameter no longer
affects the response.
**`2016-05-19`:** A comma-separated list of tones for which the service is to
return its analysis of the input; the indicated tones apply both to the full
document and to individual sentences of the document. You can specify one or more
of the valid values. Omit the parameter to request results for all three tones.
:param str content_language: The language of the input text for the request:
English or French. Regional variants are treated as their parent language; for
example, `en-US` is interpreted as `en`. The input content must match the
specified language. Do not submit content that contains both languages. You can
use different languages for **Content-Language** and **Accept-Language**.
* **`2017-09-21`:** Accepts `en` or `fr`.
* **`2016-05-19`:** Accepts only `en`.
:param str accept_language: The desired language of the response. For
two-character arguments, regional variants are treated as their parent language;
for example, `en-US` is interpreted as `en`. You can use different languages for
**Content-Language** and **Accept-Language**.
:param str content_type: The type of the input. A character encoding can be
specified by including a `charset` parameter. For example,
'text/plain;charset=utf-8'.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
|
[
"Analyze",
"general",
"tone",
"."
] |
python
|
train
|
evolbioinfo/pastml
|
pastml/ml.py
|
https://github.com/evolbioinfo/pastml/blob/df8a375841525738383e59548eed3441b07dbd3e/pastml/ml.py#L496-L563
|
def choose_ancestral_states_mppa(tree, feature, states, force_joint=True):
"""
Chooses node ancestral states based on their marginal probabilities using MPPA method.
:param force_joint: make sure that Joint state is chosen even if it has a low probability.
:type force_joint: bool
:param tree: tree of interest
:type tree: ete3.Tree
:param feature: character for which the ancestral states are to be chosen
:type feature: str
:param states: possible character states in order corresponding to the probabilities array
:type states: numpy.array
:return: number of ancestral scenarios selected,
calculated by multiplying the number of selected states for all nodes.
Also modified the get_personalized_feature_name(feature, ALLOWED_STATES) feature of each node
to only contain the selected states.
:rtype: int
"""
lh_feature = get_personalized_feature_name(feature, LH)
allowed_state_feature = get_personalized_feature_name(feature, ALLOWED_STATES)
joint_state_feature = get_personalized_feature_name(feature, JOINT_STATE)
n = len(states)
_, state2array = get_state2allowed_states(states, False)
num_scenarios = 1
unresolved_nodes = 0
num_states = 0
# If force_joint == True,
# we make sure that the joint state is always chosen,
# for this we sort the marginal probabilities array as [lowest_non_joint_mp, ..., highest_non_joint_mp, joint_mp]
# select k in 1:n such as the correction between choosing 0, 0, ..., 1/k, ..., 1/k and our sorted array is min
# and return the corresponding states
for node in tree.traverse():
marginal_likelihoods = getattr(node, lh_feature)
marginal_probs = marginal_likelihoods / marginal_likelihoods.sum()
if force_joint:
joint_index = getattr(node, joint_state_feature)
joint_prob = marginal_probs[joint_index]
marginal_probs = np.hstack((np.sort(np.delete(marginal_probs, joint_index)), [joint_prob]))
else:
marginal_probs = np.sort(marginal_probs)
best_k = n
best_correstion = np.inf
for k in range(1, n + 1):
correction = np.hstack((np.zeros(n - k), np.ones(k) / k)) - marginal_probs
correction = correction.dot(correction)
if correction < best_correstion:
best_correstion = correction
best_k = k
num_scenarios *= best_k
num_states += best_k
if force_joint:
indices_selected = sorted(range(n),
key=lambda _: (0 if n == joint_index else 1, -marginal_likelihoods[_]))[:best_k]
else:
indices_selected = sorted(range(n), key=lambda _: -marginal_likelihoods[_])[:best_k]
if best_k == 1:
allowed_states = state2array[indices_selected[0]]
else:
allowed_states = np.zeros(len(states), dtype=np.int)
allowed_states[indices_selected] = 1
unresolved_nodes += 1
node.add_feature(allowed_state_feature, allowed_states)
return num_scenarios, unresolved_nodes, num_states
|
[
"def",
"choose_ancestral_states_mppa",
"(",
"tree",
",",
"feature",
",",
"states",
",",
"force_joint",
"=",
"True",
")",
":",
"lh_feature",
"=",
"get_personalized_feature_name",
"(",
"feature",
",",
"LH",
")",
"allowed_state_feature",
"=",
"get_personalized_feature_name",
"(",
"feature",
",",
"ALLOWED_STATES",
")",
"joint_state_feature",
"=",
"get_personalized_feature_name",
"(",
"feature",
",",
"JOINT_STATE",
")",
"n",
"=",
"len",
"(",
"states",
")",
"_",
",",
"state2array",
"=",
"get_state2allowed_states",
"(",
"states",
",",
"False",
")",
"num_scenarios",
"=",
"1",
"unresolved_nodes",
"=",
"0",
"num_states",
"=",
"0",
"# If force_joint == True,",
"# we make sure that the joint state is always chosen,",
"# for this we sort the marginal probabilities array as [lowest_non_joint_mp, ..., highest_non_joint_mp, joint_mp]",
"# select k in 1:n such as the correction between choosing 0, 0, ..., 1/k, ..., 1/k and our sorted array is min",
"# and return the corresponding states",
"for",
"node",
"in",
"tree",
".",
"traverse",
"(",
")",
":",
"marginal_likelihoods",
"=",
"getattr",
"(",
"node",
",",
"lh_feature",
")",
"marginal_probs",
"=",
"marginal_likelihoods",
"/",
"marginal_likelihoods",
".",
"sum",
"(",
")",
"if",
"force_joint",
":",
"joint_index",
"=",
"getattr",
"(",
"node",
",",
"joint_state_feature",
")",
"joint_prob",
"=",
"marginal_probs",
"[",
"joint_index",
"]",
"marginal_probs",
"=",
"np",
".",
"hstack",
"(",
"(",
"np",
".",
"sort",
"(",
"np",
".",
"delete",
"(",
"marginal_probs",
",",
"joint_index",
")",
")",
",",
"[",
"joint_prob",
"]",
")",
")",
"else",
":",
"marginal_probs",
"=",
"np",
".",
"sort",
"(",
"marginal_probs",
")",
"best_k",
"=",
"n",
"best_correstion",
"=",
"np",
".",
"inf",
"for",
"k",
"in",
"range",
"(",
"1",
",",
"n",
"+",
"1",
")",
":",
"correction",
"=",
"np",
".",
"hstack",
"(",
"(",
"np",
".",
"zeros",
"(",
"n",
"-",
"k",
")",
",",
"np",
".",
"ones",
"(",
"k",
")",
"/",
"k",
")",
")",
"-",
"marginal_probs",
"correction",
"=",
"correction",
".",
"dot",
"(",
"correction",
")",
"if",
"correction",
"<",
"best_correstion",
":",
"best_correstion",
"=",
"correction",
"best_k",
"=",
"k",
"num_scenarios",
"*=",
"best_k",
"num_states",
"+=",
"best_k",
"if",
"force_joint",
":",
"indices_selected",
"=",
"sorted",
"(",
"range",
"(",
"n",
")",
",",
"key",
"=",
"lambda",
"_",
":",
"(",
"0",
"if",
"n",
"==",
"joint_index",
"else",
"1",
",",
"-",
"marginal_likelihoods",
"[",
"_",
"]",
")",
")",
"[",
":",
"best_k",
"]",
"else",
":",
"indices_selected",
"=",
"sorted",
"(",
"range",
"(",
"n",
")",
",",
"key",
"=",
"lambda",
"_",
":",
"-",
"marginal_likelihoods",
"[",
"_",
"]",
")",
"[",
":",
"best_k",
"]",
"if",
"best_k",
"==",
"1",
":",
"allowed_states",
"=",
"state2array",
"[",
"indices_selected",
"[",
"0",
"]",
"]",
"else",
":",
"allowed_states",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"states",
")",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"allowed_states",
"[",
"indices_selected",
"]",
"=",
"1",
"unresolved_nodes",
"+=",
"1",
"node",
".",
"add_feature",
"(",
"allowed_state_feature",
",",
"allowed_states",
")",
"return",
"num_scenarios",
",",
"unresolved_nodes",
",",
"num_states"
] |
Chooses node ancestral states based on their marginal probabilities using MPPA method.
:param force_joint: make sure that Joint state is chosen even if it has a low probability.
:type force_joint: bool
:param tree: tree of interest
:type tree: ete3.Tree
:param feature: character for which the ancestral states are to be chosen
:type feature: str
:param states: possible character states in order corresponding to the probabilities array
:type states: numpy.array
:return: number of ancestral scenarios selected,
calculated by multiplying the number of selected states for all nodes.
Also modified the get_personalized_feature_name(feature, ALLOWED_STATES) feature of each node
to only contain the selected states.
:rtype: int
|
[
"Chooses",
"node",
"ancestral",
"states",
"based",
"on",
"their",
"marginal",
"probabilities",
"using",
"MPPA",
"method",
"."
] |
python
|
train
|
Microsoft/azure-devops-python-api
|
azure-devops/azure/devops/v5_0/licensing/licensing_client.py
|
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/licensing/licensing_client.py#L88-L107
|
def assign_available_account_entitlement(self, user_id, dont_notify_user=None, origin=None):
"""AssignAvailableAccountEntitlement.
[Preview API] Assign an available entitilement to a user
:param str user_id: The user to which to assign the entitilement
:param bool dont_notify_user:
:param str origin:
:rtype: :class:`<AccountEntitlement> <azure.devops.v5_0.licensing.models.AccountEntitlement>`
"""
query_parameters = {}
if user_id is not None:
query_parameters['userId'] = self._serialize.query('user_id', user_id, 'str')
if dont_notify_user is not None:
query_parameters['dontNotifyUser'] = self._serialize.query('dont_notify_user', dont_notify_user, 'bool')
if origin is not None:
query_parameters['origin'] = self._serialize.query('origin', origin, 'str')
response = self._send(http_method='POST',
location_id='c01e9fd5-0d8c-4d5e-9a68-734bd8da6a38',
version='5.0-preview.1',
query_parameters=query_parameters)
return self._deserialize('AccountEntitlement', response)
|
[
"def",
"assign_available_account_entitlement",
"(",
"self",
",",
"user_id",
",",
"dont_notify_user",
"=",
"None",
",",
"origin",
"=",
"None",
")",
":",
"query_parameters",
"=",
"{",
"}",
"if",
"user_id",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'userId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'user_id'",
",",
"user_id",
",",
"'str'",
")",
"if",
"dont_notify_user",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'dontNotifyUser'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'dont_notify_user'",
",",
"dont_notify_user",
",",
"'bool'",
")",
"if",
"origin",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'origin'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'origin'",
",",
"origin",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'POST'",
",",
"location_id",
"=",
"'c01e9fd5-0d8c-4d5e-9a68-734bd8da6a38'",
",",
"version",
"=",
"'5.0-preview.1'",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'AccountEntitlement'",
",",
"response",
")"
] |
AssignAvailableAccountEntitlement.
[Preview API] Assign an available entitilement to a user
:param str user_id: The user to which to assign the entitilement
:param bool dont_notify_user:
:param str origin:
:rtype: :class:`<AccountEntitlement> <azure.devops.v5_0.licensing.models.AccountEntitlement>`
|
[
"AssignAvailableAccountEntitlement",
".",
"[",
"Preview",
"API",
"]",
"Assign",
"an",
"available",
"entitilement",
"to",
"a",
"user",
":",
"param",
"str",
"user_id",
":",
"The",
"user",
"to",
"which",
"to",
"assign",
"the",
"entitilement",
":",
"param",
"bool",
"dont_notify_user",
":",
":",
"param",
"str",
"origin",
":",
":",
"rtype",
":",
":",
"class",
":",
"<AccountEntitlement",
">",
"<azure",
".",
"devops",
".",
"v5_0",
".",
"licensing",
".",
"models",
".",
"AccountEntitlement",
">"
] |
python
|
train
|
saltstack/salt
|
salt/utils/botomod.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/botomod.py#L256-L273
|
def assign_funcs(modname, service, module=None, pack=None):
'''
Assign _get_conn and _cache_id functions to the named module.
.. code-block:: python
__utils__['boto.assign_partials'](__name__, 'ec2')
'''
if pack:
global __salt__ # pylint: disable=W0601
__salt__ = pack
mod = sys.modules[modname]
setattr(mod, '_get_conn', get_connection_func(service, module=module))
setattr(mod, '_cache_id', cache_id_func(service))
# TODO: Remove this and import salt.utils.data.exactly_one into boto_* modules instead
# Leaving this way for now so boto modules can be back ported
setattr(mod, '_exactly_one', exactly_one)
|
[
"def",
"assign_funcs",
"(",
"modname",
",",
"service",
",",
"module",
"=",
"None",
",",
"pack",
"=",
"None",
")",
":",
"if",
"pack",
":",
"global",
"__salt__",
"# pylint: disable=W0601",
"__salt__",
"=",
"pack",
"mod",
"=",
"sys",
".",
"modules",
"[",
"modname",
"]",
"setattr",
"(",
"mod",
",",
"'_get_conn'",
",",
"get_connection_func",
"(",
"service",
",",
"module",
"=",
"module",
")",
")",
"setattr",
"(",
"mod",
",",
"'_cache_id'",
",",
"cache_id_func",
"(",
"service",
")",
")",
"# TODO: Remove this and import salt.utils.data.exactly_one into boto_* modules instead",
"# Leaving this way for now so boto modules can be back ported",
"setattr",
"(",
"mod",
",",
"'_exactly_one'",
",",
"exactly_one",
")"
] |
Assign _get_conn and _cache_id functions to the named module.
.. code-block:: python
__utils__['boto.assign_partials'](__name__, 'ec2')
|
[
"Assign",
"_get_conn",
"and",
"_cache_id",
"functions",
"to",
"the",
"named",
"module",
"."
] |
python
|
train
|
Azure/azure-cli-extensions
|
src/alias/azext_alias/argument.py
|
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/alias/azext_alias/argument.py#L66-L86
|
def normalize_placeholders(arg, inject_quotes=False):
"""
Normalize placeholders' names so that the template can be ingested into Jinja template engine.
- Jinja does not accept numbers as placeholder names, so add a "_"
before the numbers to make them valid placeholder names.
- Surround placeholders expressions with "" so we can preserve spaces inside the positional arguments.
Args:
arg: The string to process.
inject_qoutes: True if we want to surround placeholders with a pair of quotes.
Returns:
A processed string where placeholders are surrounded by "" and
numbered placeholders are prepended with "_".
"""
number_placeholders = re.findall(r'{{\s*\d+\s*}}', arg)
for number_placeholder in number_placeholders:
number = re.search(r'\d+', number_placeholder).group()
arg = arg.replace(number_placeholder, '{{_' + number + '}}')
return arg.replace('{{', '"{{').replace('}}', '}}"') if inject_quotes else arg
|
[
"def",
"normalize_placeholders",
"(",
"arg",
",",
"inject_quotes",
"=",
"False",
")",
":",
"number_placeholders",
"=",
"re",
".",
"findall",
"(",
"r'{{\\s*\\d+\\s*}}'",
",",
"arg",
")",
"for",
"number_placeholder",
"in",
"number_placeholders",
":",
"number",
"=",
"re",
".",
"search",
"(",
"r'\\d+'",
",",
"number_placeholder",
")",
".",
"group",
"(",
")",
"arg",
"=",
"arg",
".",
"replace",
"(",
"number_placeholder",
",",
"'{{_'",
"+",
"number",
"+",
"'}}'",
")",
"return",
"arg",
".",
"replace",
"(",
"'{{'",
",",
"'\"{{'",
")",
".",
"replace",
"(",
"'}}'",
",",
"'}}\"'",
")",
"if",
"inject_quotes",
"else",
"arg"
] |
Normalize placeholders' names so that the template can be ingested into Jinja template engine.
- Jinja does not accept numbers as placeholder names, so add a "_"
before the numbers to make them valid placeholder names.
- Surround placeholders expressions with "" so we can preserve spaces inside the positional arguments.
Args:
arg: The string to process.
inject_qoutes: True if we want to surround placeholders with a pair of quotes.
Returns:
A processed string where placeholders are surrounded by "" and
numbered placeholders are prepended with "_".
|
[
"Normalize",
"placeholders",
"names",
"so",
"that",
"the",
"template",
"can",
"be",
"ingested",
"into",
"Jinja",
"template",
"engine",
".",
"-",
"Jinja",
"does",
"not",
"accept",
"numbers",
"as",
"placeholder",
"names",
"so",
"add",
"a",
"_",
"before",
"the",
"numbers",
"to",
"make",
"them",
"valid",
"placeholder",
"names",
".",
"-",
"Surround",
"placeholders",
"expressions",
"with",
"so",
"we",
"can",
"preserve",
"spaces",
"inside",
"the",
"positional",
"arguments",
"."
] |
python
|
train
|
pytroll/pyspectral
|
rsr_convert_scripts/msi_reader.py
|
https://github.com/pytroll/pyspectral/blob/fd296c0e0bdf5364fa180134a1292665d6bc50a3/rsr_convert_scripts/msi_reader.py#L95-L127
|
def _load(self, scale=0.001):
"""Load the Sentinel-2 MSI relative spectral responses
"""
with open_workbook(self.path) as wb_:
for sheet in wb_.sheets():
if sheet.name not in SHEET_HEADERS.keys():
continue
plt_short_name = PLATFORM_SHORT_NAME.get(self.platform_name)
if plt_short_name != SHEET_HEADERS.get(sheet.name):
continue
wvl = sheet.col_values(0, 1)
for idx in range(1, sheet.row_len(0)):
ch_name = MSI_BAND_NAMES[plt_short_name].get(str(sheet.col_values(idx, 0, 1)[0]))
if ch_name != self.bandname:
continue
resp = sheet.col_values(idx, 1)
resp = np.array(resp)
resp = np.where(resp == '', 0, resp).astype('float32')
mask = np.less_equal(resp, 0.00001)
wvl0 = np.ma.masked_array(wvl, mask=mask)
wvl_mask = np.ma.masked_outside(wvl, wvl0.min() - 2, wvl0.max() + 2)
wvl = wvl_mask.compressed()
resp = np.ma.masked_array(resp, mask=wvl_mask.mask).compressed()
self.rsr = {'wavelength': wvl / 1000., 'response': resp}
break
break
|
[
"def",
"_load",
"(",
"self",
",",
"scale",
"=",
"0.001",
")",
":",
"with",
"open_workbook",
"(",
"self",
".",
"path",
")",
"as",
"wb_",
":",
"for",
"sheet",
"in",
"wb_",
".",
"sheets",
"(",
")",
":",
"if",
"sheet",
".",
"name",
"not",
"in",
"SHEET_HEADERS",
".",
"keys",
"(",
")",
":",
"continue",
"plt_short_name",
"=",
"PLATFORM_SHORT_NAME",
".",
"get",
"(",
"self",
".",
"platform_name",
")",
"if",
"plt_short_name",
"!=",
"SHEET_HEADERS",
".",
"get",
"(",
"sheet",
".",
"name",
")",
":",
"continue",
"wvl",
"=",
"sheet",
".",
"col_values",
"(",
"0",
",",
"1",
")",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"sheet",
".",
"row_len",
"(",
"0",
")",
")",
":",
"ch_name",
"=",
"MSI_BAND_NAMES",
"[",
"plt_short_name",
"]",
".",
"get",
"(",
"str",
"(",
"sheet",
".",
"col_values",
"(",
"idx",
",",
"0",
",",
"1",
")",
"[",
"0",
"]",
")",
")",
"if",
"ch_name",
"!=",
"self",
".",
"bandname",
":",
"continue",
"resp",
"=",
"sheet",
".",
"col_values",
"(",
"idx",
",",
"1",
")",
"resp",
"=",
"np",
".",
"array",
"(",
"resp",
")",
"resp",
"=",
"np",
".",
"where",
"(",
"resp",
"==",
"''",
",",
"0",
",",
"resp",
")",
".",
"astype",
"(",
"'float32'",
")",
"mask",
"=",
"np",
".",
"less_equal",
"(",
"resp",
",",
"0.00001",
")",
"wvl0",
"=",
"np",
".",
"ma",
".",
"masked_array",
"(",
"wvl",
",",
"mask",
"=",
"mask",
")",
"wvl_mask",
"=",
"np",
".",
"ma",
".",
"masked_outside",
"(",
"wvl",
",",
"wvl0",
".",
"min",
"(",
")",
"-",
"2",
",",
"wvl0",
".",
"max",
"(",
")",
"+",
"2",
")",
"wvl",
"=",
"wvl_mask",
".",
"compressed",
"(",
")",
"resp",
"=",
"np",
".",
"ma",
".",
"masked_array",
"(",
"resp",
",",
"mask",
"=",
"wvl_mask",
".",
"mask",
")",
".",
"compressed",
"(",
")",
"self",
".",
"rsr",
"=",
"{",
"'wavelength'",
":",
"wvl",
"/",
"1000.",
",",
"'response'",
":",
"resp",
"}",
"break",
"break"
] |
Load the Sentinel-2 MSI relative spectral responses
|
[
"Load",
"the",
"Sentinel",
"-",
"2",
"MSI",
"relative",
"spectral",
"responses"
] |
python
|
train
|
StackStorm/pybind
|
pybind/nos/v6_0_2f/brocade_interface_ext_rpc/get_media_detail/output/interface/qsfpp/__init__.py
|
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_interface_ext_rpc/get_media_detail/output/interface/qsfpp/__init__.py#L403-L424
|
def _set_media_form_factor(self, v, load=False):
"""
Setter method for media_form_factor, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfpp/media_form_factor (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_media_form_factor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_media_form_factor() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'tv': {'value': 5}, u'tw': {'value': 8}, u'mi': {'value': 6}, u'tp': {'value': 7}, u'm5': {'value': 3}, u'm6': {'value': 4}, u'sm': {'value': 1}, u'unknown': {'value': 9}, u'mx': {'value': 2}},), is_leaf=True, yang_name="media-form-factor", rest_name="media-form-factor", parent=self, choice=(u'interface-identifier', u'qsfpp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """media_form_factor must be of a type compatible with enumeration""",
'defined-type': "brocade-interface-ext:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'tv': {'value': 5}, u'tw': {'value': 8}, u'mi': {'value': 6}, u'tp': {'value': 7}, u'm5': {'value': 3}, u'm6': {'value': 4}, u'sm': {'value': 1}, u'unknown': {'value': 9}, u'mx': {'value': 2}},), is_leaf=True, yang_name="media-form-factor", rest_name="media-form-factor", parent=self, choice=(u'interface-identifier', u'qsfpp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)""",
})
self.__media_form_factor = t
if hasattr(self, '_set'):
self._set()
|
[
"def",
"_set_media_form_factor",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"unicode",
",",
"restriction_type",
"=",
"\"dict_key\"",
",",
"restriction_arg",
"=",
"{",
"u'tv'",
":",
"{",
"'value'",
":",
"5",
"}",
",",
"u'tw'",
":",
"{",
"'value'",
":",
"8",
"}",
",",
"u'mi'",
":",
"{",
"'value'",
":",
"6",
"}",
",",
"u'tp'",
":",
"{",
"'value'",
":",
"7",
"}",
",",
"u'm5'",
":",
"{",
"'value'",
":",
"3",
"}",
",",
"u'm6'",
":",
"{",
"'value'",
":",
"4",
"}",
",",
"u'sm'",
":",
"{",
"'value'",
":",
"1",
"}",
",",
"u'unknown'",
":",
"{",
"'value'",
":",
"9",
"}",
",",
"u'mx'",
":",
"{",
"'value'",
":",
"2",
"}",
"}",
",",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"media-form-factor\"",
",",
"rest_name",
"=",
"\"media-form-factor\"",
",",
"parent",
"=",
"self",
",",
"choice",
"=",
"(",
"u'interface-identifier'",
",",
"u'qsfpp'",
")",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"False",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-interface-ext'",
",",
"defining_module",
"=",
"'brocade-interface-ext'",
",",
"yang_type",
"=",
"'enumeration'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"media_form_factor must be of a type compatible with enumeration\"\"\"",
",",
"'defined-type'",
":",
"\"brocade-interface-ext:enumeration\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'tv': {'value': 5}, u'tw': {'value': 8}, u'mi': {'value': 6}, u'tp': {'value': 7}, u'm5': {'value': 3}, u'm6': {'value': 4}, u'sm': {'value': 1}, u'unknown': {'value': 9}, u'mx': {'value': 2}},), is_leaf=True, yang_name=\"media-form-factor\", rest_name=\"media-form-factor\", parent=self, choice=(u'interface-identifier', u'qsfpp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='enumeration', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__media_form_factor",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] |
Setter method for media_form_factor, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfpp/media_form_factor (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_media_form_factor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_media_form_factor() directly.
|
[
"Setter",
"method",
"for",
"media_form_factor",
"mapped",
"from",
"YANG",
"variable",
"/",
"brocade_interface_ext_rpc",
"/",
"get_media_detail",
"/",
"output",
"/",
"interface",
"/",
"qsfpp",
"/",
"media_form_factor",
"(",
"enumeration",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_media_form_factor",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_media_form_factor",
"()",
"directly",
"."
] |
python
|
train
|
MasterOdin/pylint_runner
|
pylint_runner/main.py
|
https://github.com/MasterOdin/pylint_runner/blob/b8ec3324e568e172d38fc0b6fa6f5551b229de07/pylint_runner/main.py#L114-L143
|
def get_files_from_dir(self, current_dir):
"""
Recursively walk through a directory and get all python files and then walk
through any potential directories that are found off current directory,
so long as not within self.IGNORE_FOLDERS
:return: all python files that were found off current_dir
"""
if current_dir[-1] != "/" and current_dir != ".":
current_dir += "/"
files = []
for dir_file in os.listdir(current_dir):
if current_dir != ".":
file_path = current_dir + dir_file
else:
file_path = dir_file
if os.path.isfile(file_path):
file_split = os.path.splitext(dir_file)
if len(file_split) == 2 and file_split[0] != "" \
and file_split[1] == ".py":
files.append(file_path)
elif (os.path.isdir(dir_file) or os.path.isdir(file_path)) \
and dir_file not in self.ignore_folders:
path = dir_file + os.path.sep
if current_dir not in ["", "."]:
path = os.path.join(current_dir.rstrip(os.path.sep), path)
files += self.get_files_from_dir(path)
return files
|
[
"def",
"get_files_from_dir",
"(",
"self",
",",
"current_dir",
")",
":",
"if",
"current_dir",
"[",
"-",
"1",
"]",
"!=",
"\"/\"",
"and",
"current_dir",
"!=",
"\".\"",
":",
"current_dir",
"+=",
"\"/\"",
"files",
"=",
"[",
"]",
"for",
"dir_file",
"in",
"os",
".",
"listdir",
"(",
"current_dir",
")",
":",
"if",
"current_dir",
"!=",
"\".\"",
":",
"file_path",
"=",
"current_dir",
"+",
"dir_file",
"else",
":",
"file_path",
"=",
"dir_file",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"file_path",
")",
":",
"file_split",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"dir_file",
")",
"if",
"len",
"(",
"file_split",
")",
"==",
"2",
"and",
"file_split",
"[",
"0",
"]",
"!=",
"\"\"",
"and",
"file_split",
"[",
"1",
"]",
"==",
"\".py\"",
":",
"files",
".",
"append",
"(",
"file_path",
")",
"elif",
"(",
"os",
".",
"path",
".",
"isdir",
"(",
"dir_file",
")",
"or",
"os",
".",
"path",
".",
"isdir",
"(",
"file_path",
")",
")",
"and",
"dir_file",
"not",
"in",
"self",
".",
"ignore_folders",
":",
"path",
"=",
"dir_file",
"+",
"os",
".",
"path",
".",
"sep",
"if",
"current_dir",
"not",
"in",
"[",
"\"\"",
",",
"\".\"",
"]",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"current_dir",
".",
"rstrip",
"(",
"os",
".",
"path",
".",
"sep",
")",
",",
"path",
")",
"files",
"+=",
"self",
".",
"get_files_from_dir",
"(",
"path",
")",
"return",
"files"
] |
Recursively walk through a directory and get all python files and then walk
through any potential directories that are found off current directory,
so long as not within self.IGNORE_FOLDERS
:return: all python files that were found off current_dir
|
[
"Recursively",
"walk",
"through",
"a",
"directory",
"and",
"get",
"all",
"python",
"files",
"and",
"then",
"walk",
"through",
"any",
"potential",
"directories",
"that",
"are",
"found",
"off",
"current",
"directory",
"so",
"long",
"as",
"not",
"within",
"self",
".",
"IGNORE_FOLDERS",
":",
"return",
":",
"all",
"python",
"files",
"that",
"were",
"found",
"off",
"current_dir"
] |
python
|
train
|
marti1125/culqipy1_2
|
culqipy1_2/utils.py
|
https://github.com/marti1125/culqipy1_2/blob/e48ed496819009a642211f048631a5e3d4b1a16c/culqipy1_2/utils.py#L41-L73
|
def get_result(self):
"""
Returns an http response object.
"""
timeout = 60
if self.method == "GET":
timeout = 360
headers = {
"Authorization": "Bearer " + self.key,
"content-type": "application/json"
}
response = None
try:
response = getattr(requests, self.method.lower())(
self.url,
headers=headers,
params=self.data,
data=self.data,
timeout=timeout,
)
# Return the response.
return response
except requests.exceptions.RequestException:
error = {
"objeto": "error",
"tipo": "error_api",
"codigo_error": "404",
"mensaje": "conexion...",
"mensaje_usuario": "¡Error de conexion!",
}
return error
|
[
"def",
"get_result",
"(",
"self",
")",
":",
"timeout",
"=",
"60",
"if",
"self",
".",
"method",
"==",
"\"GET\"",
":",
"timeout",
"=",
"360",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"\"Bearer \"",
"+",
"self",
".",
"key",
",",
"\"content-type\"",
":",
"\"application/json\"",
"}",
"response",
"=",
"None",
"try",
":",
"response",
"=",
"getattr",
"(",
"requests",
",",
"self",
".",
"method",
".",
"lower",
"(",
")",
")",
"(",
"self",
".",
"url",
",",
"headers",
"=",
"headers",
",",
"params",
"=",
"self",
".",
"data",
",",
"data",
"=",
"self",
".",
"data",
",",
"timeout",
"=",
"timeout",
",",
")",
"# Return the response.",
"return",
"response",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
":",
"error",
"=",
"{",
"\"objeto\"",
":",
"\"error\"",
",",
"\"tipo\"",
":",
"\"error_api\"",
",",
"\"codigo_error\"",
":",
"\"404\"",
",",
"\"mensaje\"",
":",
"\"conexion...\"",
",",
"\"mensaje_usuario\"",
":",
"\"¡Error de conexion!\",",
"",
"}",
"return",
"error"
] |
Returns an http response object.
|
[
"Returns",
"an",
"http",
"response",
"object",
"."
] |
python
|
train
|
hydpy-dev/hydpy
|
hydpy/auxs/statstools.py
|
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/statstools.py#L241-L257
|
def corr(sim=None, obs=None, node=None, skip_nan=False):
"""Calculate the product-moment correlation coefficient after Pearson.
>>> from hydpy import round_
>>> from hydpy import corr
>>> round_(corr(sim=[0.5, 1.0, 1.5], obs=[1.0, 2.0, 3.0]))
1.0
>>> round_(corr(sim=[4.0, 2.0, 0.0], obs=[1.0, 2.0, 3.0]))
-1.0
>>> round_(corr(sim=[1.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]))
0.0
See the documentation on function |prepare_arrays| for some
additional instructions for use of function |corr|.
"""
sim, obs = prepare_arrays(sim, obs, node, skip_nan)
return numpy.corrcoef(sim, obs)[0, 1]
|
[
"def",
"corr",
"(",
"sim",
"=",
"None",
",",
"obs",
"=",
"None",
",",
"node",
"=",
"None",
",",
"skip_nan",
"=",
"False",
")",
":",
"sim",
",",
"obs",
"=",
"prepare_arrays",
"(",
"sim",
",",
"obs",
",",
"node",
",",
"skip_nan",
")",
"return",
"numpy",
".",
"corrcoef",
"(",
"sim",
",",
"obs",
")",
"[",
"0",
",",
"1",
"]"
] |
Calculate the product-moment correlation coefficient after Pearson.
>>> from hydpy import round_
>>> from hydpy import corr
>>> round_(corr(sim=[0.5, 1.0, 1.5], obs=[1.0, 2.0, 3.0]))
1.0
>>> round_(corr(sim=[4.0, 2.0, 0.0], obs=[1.0, 2.0, 3.0]))
-1.0
>>> round_(corr(sim=[1.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0]))
0.0
See the documentation on function |prepare_arrays| for some
additional instructions for use of function |corr|.
|
[
"Calculate",
"the",
"product",
"-",
"moment",
"correlation",
"coefficient",
"after",
"Pearson",
"."
] |
python
|
train
|
matthew-brett/delocate
|
delocate/tools.py
|
https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/tools.py#L197-L222
|
def get_install_names(filename):
""" Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename`
"""
lines = _cmd_out_err(['otool', '-L', filename])
if not _line0_says_object(lines[0], filename):
return ()
names = tuple(parse_install_name(line)[0] for line in lines[1:])
install_id = get_install_id(filename)
if not install_id is None:
assert names[0] == install_id
return names[1:]
return names
|
[
"def",
"get_install_names",
"(",
"filename",
")",
":",
"lines",
"=",
"_cmd_out_err",
"(",
"[",
"'otool'",
",",
"'-L'",
",",
"filename",
"]",
")",
"if",
"not",
"_line0_says_object",
"(",
"lines",
"[",
"0",
"]",
",",
"filename",
")",
":",
"return",
"(",
")",
"names",
"=",
"tuple",
"(",
"parse_install_name",
"(",
"line",
")",
"[",
"0",
"]",
"for",
"line",
"in",
"lines",
"[",
"1",
":",
"]",
")",
"install_id",
"=",
"get_install_id",
"(",
"filename",
")",
"if",
"not",
"install_id",
"is",
"None",
":",
"assert",
"names",
"[",
"0",
"]",
"==",
"install_id",
"return",
"names",
"[",
"1",
":",
"]",
"return",
"names"
] |
Return install names from library named in `filename`
Returns tuple of install names
tuple will be empty if no install names, or if this is not an object file.
Parameters
----------
filename : str
filename of library
Returns
-------
install_names : tuple
tuple of install names for library `filename`
|
[
"Return",
"install",
"names",
"from",
"library",
"named",
"in",
"filename"
] |
python
|
train
|
google/pyringe
|
pyringe/payload/libpython.py
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/payload/libpython.py#L273-L337
|
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print 'tp_flags = 0x%08x' % tp_flags
#print 'tp_name = %r' % tp_name
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'instance': PyInstanceObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_INT_SUBCLASS:
return PyIntObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_STRING_SUBCLASS:
return PyStringObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:
# return PyTypeObjectPtr
# Use the base class:
return cls
|
[
"def",
"subclass_from_type",
"(",
"cls",
",",
"t",
")",
":",
"try",
":",
"tp_name",
"=",
"t",
".",
"field",
"(",
"'tp_name'",
")",
".",
"string",
"(",
")",
"tp_flags",
"=",
"int",
"(",
"t",
".",
"field",
"(",
"'tp_flags'",
")",
")",
"except",
"RuntimeError",
":",
"# Handle any kind of error e.g. NULL ptrs by simply using the base",
"# class",
"return",
"cls",
"#print 'tp_flags = 0x%08x' % tp_flags",
"#print 'tp_name = %r' % tp_name",
"name_map",
"=",
"{",
"'bool'",
":",
"PyBoolObjectPtr",
",",
"'classobj'",
":",
"PyClassObjectPtr",
",",
"'instance'",
":",
"PyInstanceObjectPtr",
",",
"'NoneType'",
":",
"PyNoneStructPtr",
",",
"'frame'",
":",
"PyFrameObjectPtr",
",",
"'set'",
":",
"PySetObjectPtr",
",",
"'frozenset'",
":",
"PySetObjectPtr",
",",
"'builtin_function_or_method'",
":",
"PyCFunctionObjectPtr",
",",
"}",
"if",
"tp_name",
"in",
"name_map",
":",
"return",
"name_map",
"[",
"tp_name",
"]",
"if",
"tp_flags",
"&",
"Py_TPFLAGS_HEAPTYPE",
":",
"return",
"HeapTypeObjectPtr",
"if",
"tp_flags",
"&",
"Py_TPFLAGS_INT_SUBCLASS",
":",
"return",
"PyIntObjectPtr",
"if",
"tp_flags",
"&",
"Py_TPFLAGS_LONG_SUBCLASS",
":",
"return",
"PyLongObjectPtr",
"if",
"tp_flags",
"&",
"Py_TPFLAGS_LIST_SUBCLASS",
":",
"return",
"PyListObjectPtr",
"if",
"tp_flags",
"&",
"Py_TPFLAGS_TUPLE_SUBCLASS",
":",
"return",
"PyTupleObjectPtr",
"if",
"tp_flags",
"&",
"Py_TPFLAGS_STRING_SUBCLASS",
":",
"return",
"PyStringObjectPtr",
"if",
"tp_flags",
"&",
"Py_TPFLAGS_UNICODE_SUBCLASS",
":",
"return",
"PyUnicodeObjectPtr",
"if",
"tp_flags",
"&",
"Py_TPFLAGS_DICT_SUBCLASS",
":",
"return",
"PyDictObjectPtr",
"if",
"tp_flags",
"&",
"Py_TPFLAGS_BASE_EXC_SUBCLASS",
":",
"return",
"PyBaseExceptionObjectPtr",
"#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:",
"# return PyTypeObjectPtr",
"# Use the base class:",
"return",
"cls"
] |
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
|
[
"Given",
"a",
"PyTypeObjectPtr",
"instance",
"wrapping",
"a",
"gdb",
".",
"Value",
"that",
"s",
"a",
"(",
"PyTypeObject",
"*",
")",
"determine",
"the",
"corresponding",
"subclass",
"of",
"PyObjectPtr",
"to",
"use"
] |
python
|
train
|
9wfox/tornadoweb
|
tornadoweb/app.py
|
https://github.com/9wfox/tornadoweb/blob/2286b66fbe10e4d9f212b979664c15fa17adf378/tornadoweb/app.py#L87-L101
|
def _run_server(self):
"""
启动 HTTP Server
"""
try:
if __conf__.DEBUG:
self._webapp.listen(self._port)
else:
server = HTTPServer(self._webapp)
server.bind(self._port)
server.start(0)
IOLoop.current().start()
except KeyboardInterrupt:
print ("exit ...")
|
[
"def",
"_run_server",
"(",
"self",
")",
":",
"try",
":",
"if",
"__conf__",
".",
"DEBUG",
":",
"self",
".",
"_webapp",
".",
"listen",
"(",
"self",
".",
"_port",
")",
"else",
":",
"server",
"=",
"HTTPServer",
"(",
"self",
".",
"_webapp",
")",
"server",
".",
"bind",
"(",
"self",
".",
"_port",
")",
"server",
".",
"start",
"(",
"0",
")",
"IOLoop",
".",
"current",
"(",
")",
".",
"start",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"\"exit ...\"",
")"
] |
启动 HTTP Server
|
[
"启动",
"HTTP",
"Server"
] |
python
|
train
|
3ll3d00d/vibe
|
backend/src/core/reactor.py
|
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/core/reactor.py#L23-L34
|
def _accept(self):
"""
Work loop runs forever (or until running is False)
:return:
"""
logger.warning("Reactor " + self._name + " is starting")
while self.running:
try:
self._completeTask()
except:
logger.exception("Unexpected exception during request processing")
logger.warning("Reactor " + self._name + " is terminating")
|
[
"def",
"_accept",
"(",
"self",
")",
":",
"logger",
".",
"warning",
"(",
"\"Reactor \"",
"+",
"self",
".",
"_name",
"+",
"\" is starting\"",
")",
"while",
"self",
".",
"running",
":",
"try",
":",
"self",
".",
"_completeTask",
"(",
")",
"except",
":",
"logger",
".",
"exception",
"(",
"\"Unexpected exception during request processing\"",
")",
"logger",
".",
"warning",
"(",
"\"Reactor \"",
"+",
"self",
".",
"_name",
"+",
"\" is terminating\"",
")"
] |
Work loop runs forever (or until running is False)
:return:
|
[
"Work",
"loop",
"runs",
"forever",
"(",
"or",
"until",
"running",
"is",
"False",
")",
":",
"return",
":"
] |
python
|
train
|
klen/makesite
|
makesite/install.py
|
https://github.com/klen/makesite/blob/f6f77a43a04a256189e8fffbeac1ffd63f35a10c/makesite/install.py#L46-L90
|
def clone_source(self):
" Clone source and prepare templates "
print_header('Clone src: %s' % self.src, '-')
# Get source
source_dir = self._get_source()
# Append settings from source
self.read(op.join(source_dir, settings.CFGNAME))
self.templates += (self.args.template or self.template).split(',')
self.templates = OrderedSet(self._gen_templates(self.templates))
self['template'] = ','.join(str(x[0]) for x in self.templates)
print_header('Deploy templates: %s' % self.template, sep='-')
with open(op.join(self.deploy_dir, settings.TPLNAME), 'w') as f:
f.write(self.template)
with open(op.join(self.deploy_dir, settings.CFGNAME), 'w') as f:
self['deploy_dir'], tmp_dir = self.target_dir, self.deploy_dir
self.write(f)
self['deploy_dir'] = tmp_dir
# Create site
site = Site(self.deploy_dir)
# Prepare templates
for template_name, template in self.templates:
site.paste_template(template_name, template, tmp_dir)
# Create site
if self.args.info:
print_header('Project context', sep='-')
LOGGER.debug(site.get_info(full=True))
return None
# Check requirements
call('sudo chmod +x %s/*.sh' % self.service_dir)
site.run_check(service_dir=self.service_dir)
# Save options
site.write()
return site
|
[
"def",
"clone_source",
"(",
"self",
")",
":",
"print_header",
"(",
"'Clone src: %s'",
"%",
"self",
".",
"src",
",",
"'-'",
")",
"# Get source",
"source_dir",
"=",
"self",
".",
"_get_source",
"(",
")",
"# Append settings from source",
"self",
".",
"read",
"(",
"op",
".",
"join",
"(",
"source_dir",
",",
"settings",
".",
"CFGNAME",
")",
")",
"self",
".",
"templates",
"+=",
"(",
"self",
".",
"args",
".",
"template",
"or",
"self",
".",
"template",
")",
".",
"split",
"(",
"','",
")",
"self",
".",
"templates",
"=",
"OrderedSet",
"(",
"self",
".",
"_gen_templates",
"(",
"self",
".",
"templates",
")",
")",
"self",
"[",
"'template'",
"]",
"=",
"','",
".",
"join",
"(",
"str",
"(",
"x",
"[",
"0",
"]",
")",
"for",
"x",
"in",
"self",
".",
"templates",
")",
"print_header",
"(",
"'Deploy templates: %s'",
"%",
"self",
".",
"template",
",",
"sep",
"=",
"'-'",
")",
"with",
"open",
"(",
"op",
".",
"join",
"(",
"self",
".",
"deploy_dir",
",",
"settings",
".",
"TPLNAME",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"template",
")",
"with",
"open",
"(",
"op",
".",
"join",
"(",
"self",
".",
"deploy_dir",
",",
"settings",
".",
"CFGNAME",
")",
",",
"'w'",
")",
"as",
"f",
":",
"self",
"[",
"'deploy_dir'",
"]",
",",
"tmp_dir",
"=",
"self",
".",
"target_dir",
",",
"self",
".",
"deploy_dir",
"self",
".",
"write",
"(",
"f",
")",
"self",
"[",
"'deploy_dir'",
"]",
"=",
"tmp_dir",
"# Create site",
"site",
"=",
"Site",
"(",
"self",
".",
"deploy_dir",
")",
"# Prepare templates",
"for",
"template_name",
",",
"template",
"in",
"self",
".",
"templates",
":",
"site",
".",
"paste_template",
"(",
"template_name",
",",
"template",
",",
"tmp_dir",
")",
"# Create site",
"if",
"self",
".",
"args",
".",
"info",
":",
"print_header",
"(",
"'Project context'",
",",
"sep",
"=",
"'-'",
")",
"LOGGER",
".",
"debug",
"(",
"site",
".",
"get_info",
"(",
"full",
"=",
"True",
")",
")",
"return",
"None",
"# Check requirements",
"call",
"(",
"'sudo chmod +x %s/*.sh'",
"%",
"self",
".",
"service_dir",
")",
"site",
".",
"run_check",
"(",
"service_dir",
"=",
"self",
".",
"service_dir",
")",
"# Save options",
"site",
".",
"write",
"(",
")",
"return",
"site"
] |
Clone source and prepare templates
|
[
"Clone",
"source",
"and",
"prepare",
"templates"
] |
python
|
train
|
quintusdias/glymur
|
glymur/jp2k.py
|
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/jp2k.py#L1850-L1865
|
def _validate_label(self, boxes):
"""
Label boxes can only be inside association, codestream headers, or
compositing layer header boxes.
"""
for box in boxes:
if box.box_id != 'asoc':
if hasattr(box, 'box'):
for boxi in box.box:
if boxi.box_id == 'lbl ':
msg = ("A label box cannot be nested inside a "
"{0} box.")
msg = msg.format(box.box_id)
raise IOError(msg)
# Same set of checks on any child boxes.
self._validate_label(box.box)
|
[
"def",
"_validate_label",
"(",
"self",
",",
"boxes",
")",
":",
"for",
"box",
"in",
"boxes",
":",
"if",
"box",
".",
"box_id",
"!=",
"'asoc'",
":",
"if",
"hasattr",
"(",
"box",
",",
"'box'",
")",
":",
"for",
"boxi",
"in",
"box",
".",
"box",
":",
"if",
"boxi",
".",
"box_id",
"==",
"'lbl '",
":",
"msg",
"=",
"(",
"\"A label box cannot be nested inside a \"",
"\"{0} box.\"",
")",
"msg",
"=",
"msg",
".",
"format",
"(",
"box",
".",
"box_id",
")",
"raise",
"IOError",
"(",
"msg",
")",
"# Same set of checks on any child boxes.",
"self",
".",
"_validate_label",
"(",
"box",
".",
"box",
")"
] |
Label boxes can only be inside association, codestream headers, or
compositing layer header boxes.
|
[
"Label",
"boxes",
"can",
"only",
"be",
"inside",
"association",
"codestream",
"headers",
"or",
"compositing",
"layer",
"header",
"boxes",
"."
] |
python
|
train
|
zetaops/zengine
|
zengine/views/task_manager_actions.py
|
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/task_manager_actions.py#L65-L90
|
def select_role(self):
"""
The workflow method to be assigned to the person with the same role and unit as the user.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
roles = [(m.key, m.__unicode__()) for m in RoleModel.objects.filter(
abstract_role=self.current.role.abstract_role,
unit=self.current.role.unit) if m != self.current.role]
if roles:
_form = forms.JsonForm(title=_(u'Assign to workflow'))
_form.select_role = fields.Integer(_(u"Chose Role"), choices=roles)
_form.explain_text = fields.String(_(u"Explain Text"), required=False)
_form.send_button = fields.Button(_(u"Send"))
self.form_out(_form)
else:
title = _(u"Unsuccessful")
msg = _(u"Assign role not found")
self.current.msg_box(title=title, msg=msg)
|
[
"def",
"select_role",
"(",
"self",
")",
":",
"roles",
"=",
"[",
"(",
"m",
".",
"key",
",",
"m",
".",
"__unicode__",
"(",
")",
")",
"for",
"m",
"in",
"RoleModel",
".",
"objects",
".",
"filter",
"(",
"abstract_role",
"=",
"self",
".",
"current",
".",
"role",
".",
"abstract_role",
",",
"unit",
"=",
"self",
".",
"current",
".",
"role",
".",
"unit",
")",
"if",
"m",
"!=",
"self",
".",
"current",
".",
"role",
"]",
"if",
"roles",
":",
"_form",
"=",
"forms",
".",
"JsonForm",
"(",
"title",
"=",
"_",
"(",
"u'Assign to workflow'",
")",
")",
"_form",
".",
"select_role",
"=",
"fields",
".",
"Integer",
"(",
"_",
"(",
"u\"Chose Role\"",
")",
",",
"choices",
"=",
"roles",
")",
"_form",
".",
"explain_text",
"=",
"fields",
".",
"String",
"(",
"_",
"(",
"u\"Explain Text\"",
")",
",",
"required",
"=",
"False",
")",
"_form",
".",
"send_button",
"=",
"fields",
".",
"Button",
"(",
"_",
"(",
"u\"Send\"",
")",
")",
"self",
".",
"form_out",
"(",
"_form",
")",
"else",
":",
"title",
"=",
"_",
"(",
"u\"Unsuccessful\"",
")",
"msg",
"=",
"_",
"(",
"u\"Assign role not found\"",
")",
"self",
".",
"current",
".",
"msg_box",
"(",
"title",
"=",
"title",
",",
"msg",
"=",
"msg",
")"
] |
The workflow method to be assigned to the person with the same role and unit as the user.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
|
[
"The",
"workflow",
"method",
"to",
"be",
"assigned",
"to",
"the",
"person",
"with",
"the",
"same",
"role",
"and",
"unit",
"as",
"the",
"user",
".",
"..",
"code",
"-",
"block",
"::",
"python"
] |
python
|
train
|
pantsbuild/pex
|
pex/translator.py
|
https://github.com/pantsbuild/pex/blob/87b2129d860250d3b9edce75b9cb62f9789ee521/pex/translator.py#L133-L146
|
def translate(self, package, into=None):
"""From a binary package, translate to a local binary distribution."""
if not package.local:
raise ValueError('BinaryTranslator cannot translate remote packages.')
if not isinstance(package, self._package_type):
return None
if not package.compatible(self._supported_tags):
TRACER.log('Target package %s is not compatible with %s' % (
package, self._supported_tags))
return None
into = into or safe_mkdtemp()
target_path = os.path.join(into, package.filename)
safe_copy(package.local_path, target_path)
return DistributionHelper.distribution_from_path(target_path)
|
[
"def",
"translate",
"(",
"self",
",",
"package",
",",
"into",
"=",
"None",
")",
":",
"if",
"not",
"package",
".",
"local",
":",
"raise",
"ValueError",
"(",
"'BinaryTranslator cannot translate remote packages.'",
")",
"if",
"not",
"isinstance",
"(",
"package",
",",
"self",
".",
"_package_type",
")",
":",
"return",
"None",
"if",
"not",
"package",
".",
"compatible",
"(",
"self",
".",
"_supported_tags",
")",
":",
"TRACER",
".",
"log",
"(",
"'Target package %s is not compatible with %s'",
"%",
"(",
"package",
",",
"self",
".",
"_supported_tags",
")",
")",
"return",
"None",
"into",
"=",
"into",
"or",
"safe_mkdtemp",
"(",
")",
"target_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"into",
",",
"package",
".",
"filename",
")",
"safe_copy",
"(",
"package",
".",
"local_path",
",",
"target_path",
")",
"return",
"DistributionHelper",
".",
"distribution_from_path",
"(",
"target_path",
")"
] |
From a binary package, translate to a local binary distribution.
|
[
"From",
"a",
"binary",
"package",
"translate",
"to",
"a",
"local",
"binary",
"distribution",
"."
] |
python
|
train
|
zhmcclient/python-zhmcclient
|
zhmcclient_mock/_urihandler.py
|
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L929-L940
|
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List CPCs."""
query_str = uri_parms[0]
result_cpcs = []
filter_args = parse_query_parms(method, uri, query_str)
for cpc in hmc.cpcs.list(filter_args):
result_cpc = {}
for prop in cpc.properties:
if prop in ('object-uri', 'name', 'status'):
result_cpc[prop] = cpc.properties[prop]
result_cpcs.append(result_cpc)
return {'cpcs': result_cpcs}
|
[
"def",
"get",
"(",
"method",
",",
"hmc",
",",
"uri",
",",
"uri_parms",
",",
"logon_required",
")",
":",
"query_str",
"=",
"uri_parms",
"[",
"0",
"]",
"result_cpcs",
"=",
"[",
"]",
"filter_args",
"=",
"parse_query_parms",
"(",
"method",
",",
"uri",
",",
"query_str",
")",
"for",
"cpc",
"in",
"hmc",
".",
"cpcs",
".",
"list",
"(",
"filter_args",
")",
":",
"result_cpc",
"=",
"{",
"}",
"for",
"prop",
"in",
"cpc",
".",
"properties",
":",
"if",
"prop",
"in",
"(",
"'object-uri'",
",",
"'name'",
",",
"'status'",
")",
":",
"result_cpc",
"[",
"prop",
"]",
"=",
"cpc",
".",
"properties",
"[",
"prop",
"]",
"result_cpcs",
".",
"append",
"(",
"result_cpc",
")",
"return",
"{",
"'cpcs'",
":",
"result_cpcs",
"}"
] |
Operation: List CPCs.
|
[
"Operation",
":",
"List",
"CPCs",
"."
] |
python
|
train
|
yyuu/botornado
|
boto/dynamodb/layer2.py
|
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/dynamodb/layer2.py#L593-L684
|
def query(self, table, hash_key, range_key_condition=None,
attributes_to_get=None, request_limit=None,
max_results=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
item_class=Item):
"""
Perform a query on the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being queried.
:type hash_key: int|long|float|str|unicode
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key_condition: dict
:param range_key_condition: A dict where the key is either
a scalar value appropriate for the RangeKey in the schema
of the database or a tuple of such values. The value
associated with this key in the dict will be one of the
following conditions:
'EQ'|'LE'|'LT'|'GE'|'GT'|'BEGINS_WITH'|'BETWEEN'
The only condition which expects or will accept a tuple
of values is 'BETWEEN', otherwise a scalar value should
be used as the key in the dict.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: generator
"""
rkc = self.dynamize_range_key_condition(range_key_condition)
response = True
n = 0
while response:
if response is True:
pass
elif response.has_key("LastEvaluatedKey"):
lek = response['LastEvaluatedKey']
exclusive_start_key = self.dynamize_last_evaluated_key(lek)
else:
break
response = self.layer1.query(table.name,
self.dynamize_value(hash_key),
rkc, attributes_to_get, request_limit,
consistent_read, scan_index_forward,
exclusive_start_key,
object_hook=item_object_hook)
for item in response['Items']:
if max_results and n == max_results:
break
yield item_class(table, attrs=item)
n += 1
|
[
"def",
"query",
"(",
"self",
",",
"table",
",",
"hash_key",
",",
"range_key_condition",
"=",
"None",
",",
"attributes_to_get",
"=",
"None",
",",
"request_limit",
"=",
"None",
",",
"max_results",
"=",
"None",
",",
"consistent_read",
"=",
"False",
",",
"scan_index_forward",
"=",
"True",
",",
"exclusive_start_key",
"=",
"None",
",",
"item_class",
"=",
"Item",
")",
":",
"rkc",
"=",
"self",
".",
"dynamize_range_key_condition",
"(",
"range_key_condition",
")",
"response",
"=",
"True",
"n",
"=",
"0",
"while",
"response",
":",
"if",
"response",
"is",
"True",
":",
"pass",
"elif",
"response",
".",
"has_key",
"(",
"\"LastEvaluatedKey\"",
")",
":",
"lek",
"=",
"response",
"[",
"'LastEvaluatedKey'",
"]",
"exclusive_start_key",
"=",
"self",
".",
"dynamize_last_evaluated_key",
"(",
"lek",
")",
"else",
":",
"break",
"response",
"=",
"self",
".",
"layer1",
".",
"query",
"(",
"table",
".",
"name",
",",
"self",
".",
"dynamize_value",
"(",
"hash_key",
")",
",",
"rkc",
",",
"attributes_to_get",
",",
"request_limit",
",",
"consistent_read",
",",
"scan_index_forward",
",",
"exclusive_start_key",
",",
"object_hook",
"=",
"item_object_hook",
")",
"for",
"item",
"in",
"response",
"[",
"'Items'",
"]",
":",
"if",
"max_results",
"and",
"n",
"==",
"max_results",
":",
"break",
"yield",
"item_class",
"(",
"table",
",",
"attrs",
"=",
"item",
")",
"n",
"+=",
"1"
] |
Perform a query on the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being queried.
:type hash_key: int|long|float|str|unicode
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key_condition: dict
:param range_key_condition: A dict where the key is either
a scalar value appropriate for the RangeKey in the schema
of the database or a tuple of such values. The value
associated with this key in the dict will be one of the
following conditions:
'EQ'|'LE'|'LT'|'GE'|'GT'|'BEGINS_WITH'|'BETWEEN'
The only condition which expects or will accept a tuple
of values is 'BETWEEN', otherwise a scalar value should
be used as the key in the dict.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: generator
|
[
"Perform",
"a",
"query",
"on",
"the",
"table",
".",
":",
"type",
"table",
":",
":",
"class",
":",
"boto",
".",
"dynamodb",
".",
"table",
".",
"Table",
":",
"param",
"table",
":",
"The",
"Table",
"object",
"that",
"is",
"being",
"queried",
".",
":",
"type",
"hash_key",
":",
"int|long|float|str|unicode",
":",
"param",
"hash_key",
":",
"The",
"HashKey",
"of",
"the",
"requested",
"item",
".",
"The",
"type",
"of",
"the",
"value",
"must",
"match",
"the",
"type",
"defined",
"in",
"the",
"schema",
"for",
"the",
"table",
"."
] |
python
|
train
|
danilobellini/audiolazy
|
examples/pi.py
|
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/examples/pi.py#L41-L46
|
def atan_mgl(x, n=10):
"""
Finds the arctan using the Madhava-Gregory-Leibniz series.
"""
acc = 1 / (1 - z ** -1) # Accumulator filter
return acc(mgl_seq(x)).skip(n-1).take()
|
[
"def",
"atan_mgl",
"(",
"x",
",",
"n",
"=",
"10",
")",
":",
"acc",
"=",
"1",
"/",
"(",
"1",
"-",
"z",
"**",
"-",
"1",
")",
"# Accumulator filter",
"return",
"acc",
"(",
"mgl_seq",
"(",
"x",
")",
")",
".",
"skip",
"(",
"n",
"-",
"1",
")",
".",
"take",
"(",
")"
] |
Finds the arctan using the Madhava-Gregory-Leibniz series.
|
[
"Finds",
"the",
"arctan",
"using",
"the",
"Madhava",
"-",
"Gregory",
"-",
"Leibniz",
"series",
"."
] |
python
|
train
|
peri-source/peri
|
peri/comp/comp.py
|
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/comp/comp.py#L88-L99
|
def get_values(self, params):
"""
Get the value of a list or single parameter.
Parameters
----------
params : string, list of string
name of parameters which to retrieve
"""
return util.delistify(
[self.param_dict[p] for p in util.listify(params)], params
)
|
[
"def",
"get_values",
"(",
"self",
",",
"params",
")",
":",
"return",
"util",
".",
"delistify",
"(",
"[",
"self",
".",
"param_dict",
"[",
"p",
"]",
"for",
"p",
"in",
"util",
".",
"listify",
"(",
"params",
")",
"]",
",",
"params",
")"
] |
Get the value of a list or single parameter.
Parameters
----------
params : string, list of string
name of parameters which to retrieve
|
[
"Get",
"the",
"value",
"of",
"a",
"list",
"or",
"single",
"parameter",
"."
] |
python
|
valid
|
RiotGames/cloud-inquisitor
|
backend/cloud_inquisitor/plugins/types/enforcements.py
|
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/types/enforcements.py#L30-L49
|
def get_all(cls, account_id=None, location=None):
""" Return all Enforcements
args:
`account_id` : Unique Account Identifier
`location` : Region associated with the Resource
returns:
list of enforcement objects
"""
qry = db.Enforcements.filter()
if account_id:
qry = qry.filter(account_id == Enforcements.account_id)
if location:
qry = qry.join(Resource, Resource.location == location)
return qry
|
[
"def",
"get_all",
"(",
"cls",
",",
"account_id",
"=",
"None",
",",
"location",
"=",
"None",
")",
":",
"qry",
"=",
"db",
".",
"Enforcements",
".",
"filter",
"(",
")",
"if",
"account_id",
":",
"qry",
"=",
"qry",
".",
"filter",
"(",
"account_id",
"==",
"Enforcements",
".",
"account_id",
")",
"if",
"location",
":",
"qry",
"=",
"qry",
".",
"join",
"(",
"Resource",
",",
"Resource",
".",
"location",
"==",
"location",
")",
"return",
"qry"
] |
Return all Enforcements
args:
`account_id` : Unique Account Identifier
`location` : Region associated with the Resource
returns:
list of enforcement objects
|
[
"Return",
"all",
"Enforcements"
] |
python
|
train
|
PyMySQL/Tornado-MySQL
|
tornado_mysql/connections.py
|
https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/connections.py#L588-L594
|
def close(self):
"""Close the socket without sending quit message."""
stream = self._stream
if stream is None:
return
self._stream = None
stream.close()
|
[
"def",
"close",
"(",
"self",
")",
":",
"stream",
"=",
"self",
".",
"_stream",
"if",
"stream",
"is",
"None",
":",
"return",
"self",
".",
"_stream",
"=",
"None",
"stream",
".",
"close",
"(",
")"
] |
Close the socket without sending quit message.
|
[
"Close",
"the",
"socket",
"without",
"sending",
"quit",
"message",
"."
] |
python
|
train
|
chaoss/grimoirelab-sirmordred
|
sirmordred/task_enrich.py
|
https://github.com/chaoss/grimoirelab-sirmordred/blob/d6ac94d28d707fae23170064d078f1edf937d13e/sirmordred/task_enrich.py#L268-L302
|
def __autorefresh_studies(self, cfg):
"""Execute autorefresh for areas of code study if configured"""
if 'studies' not in self.conf[self.backend_section] or \
'enrich_areas_of_code:git' not in self.conf[self.backend_section]['studies']:
logger.debug("Not doing autorefresh for studies, Areas of Code study is not active.")
return
aoc_index = self.conf['enrich_areas_of_code:git'].get('out_index', GitEnrich.GIT_AOC_ENRICHED)
# if `out_index` exists but has no value, use default
if not aoc_index:
aoc_index = GitEnrich.GIT_AOC_ENRICHED
logger.debug("Autorefresh for Areas of Code study index: %s", aoc_index)
es = Elasticsearch([self.conf['es_enrichment']['url']], timeout=100,
verify_certs=self._get_enrich_backend().elastic.requests.verify)
if not es.indices.exists(index=aoc_index):
logger.debug("Not doing autorefresh, index doesn't exist for Areas of Code study")
return
logger.debug("Doing autorefresh for Areas of Code study")
# Create a GitEnrich backend tweaked to work with AOC index
aoc_backend = GitEnrich(self.db_sh, None, cfg['projects']['projects_file'],
self.db_user, self.db_password, self.db_host)
aoc_backend.mapping = None
aoc_backend.roles = ['author']
elastic_enrich = get_elastic(self.conf['es_enrichment']['url'],
aoc_index, clean=False, backend=aoc_backend)
aoc_backend.set_elastic(elastic_enrich)
self.__autorefresh(aoc_backend, studies=True)
|
[
"def",
"__autorefresh_studies",
"(",
"self",
",",
"cfg",
")",
":",
"if",
"'studies'",
"not",
"in",
"self",
".",
"conf",
"[",
"self",
".",
"backend_section",
"]",
"or",
"'enrich_areas_of_code:git'",
"not",
"in",
"self",
".",
"conf",
"[",
"self",
".",
"backend_section",
"]",
"[",
"'studies'",
"]",
":",
"logger",
".",
"debug",
"(",
"\"Not doing autorefresh for studies, Areas of Code study is not active.\"",
")",
"return",
"aoc_index",
"=",
"self",
".",
"conf",
"[",
"'enrich_areas_of_code:git'",
"]",
".",
"get",
"(",
"'out_index'",
",",
"GitEnrich",
".",
"GIT_AOC_ENRICHED",
")",
"# if `out_index` exists but has no value, use default",
"if",
"not",
"aoc_index",
":",
"aoc_index",
"=",
"GitEnrich",
".",
"GIT_AOC_ENRICHED",
"logger",
".",
"debug",
"(",
"\"Autorefresh for Areas of Code study index: %s\"",
",",
"aoc_index",
")",
"es",
"=",
"Elasticsearch",
"(",
"[",
"self",
".",
"conf",
"[",
"'es_enrichment'",
"]",
"[",
"'url'",
"]",
"]",
",",
"timeout",
"=",
"100",
",",
"verify_certs",
"=",
"self",
".",
"_get_enrich_backend",
"(",
")",
".",
"elastic",
".",
"requests",
".",
"verify",
")",
"if",
"not",
"es",
".",
"indices",
".",
"exists",
"(",
"index",
"=",
"aoc_index",
")",
":",
"logger",
".",
"debug",
"(",
"\"Not doing autorefresh, index doesn't exist for Areas of Code study\"",
")",
"return",
"logger",
".",
"debug",
"(",
"\"Doing autorefresh for Areas of Code study\"",
")",
"# Create a GitEnrich backend tweaked to work with AOC index",
"aoc_backend",
"=",
"GitEnrich",
"(",
"self",
".",
"db_sh",
",",
"None",
",",
"cfg",
"[",
"'projects'",
"]",
"[",
"'projects_file'",
"]",
",",
"self",
".",
"db_user",
",",
"self",
".",
"db_password",
",",
"self",
".",
"db_host",
")",
"aoc_backend",
".",
"mapping",
"=",
"None",
"aoc_backend",
".",
"roles",
"=",
"[",
"'author'",
"]",
"elastic_enrich",
"=",
"get_elastic",
"(",
"self",
".",
"conf",
"[",
"'es_enrichment'",
"]",
"[",
"'url'",
"]",
",",
"aoc_index",
",",
"clean",
"=",
"False",
",",
"backend",
"=",
"aoc_backend",
")",
"aoc_backend",
".",
"set_elastic",
"(",
"elastic_enrich",
")",
"self",
".",
"__autorefresh",
"(",
"aoc_backend",
",",
"studies",
"=",
"True",
")"
] |
Execute autorefresh for areas of code study if configured
|
[
"Execute",
"autorefresh",
"for",
"areas",
"of",
"code",
"study",
"if",
"configured"
] |
python
|
valid
|
bids-standard/pybids
|
bids/variables/variables.py
|
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/variables/variables.py#L432-L469
|
def resample(self, sampling_rate, inplace=False, kind='linear'):
'''Resample the Variable to the specified sampling rate.
Parameters
----------
sampling_rate : :obj:`int`, :obj:`float`
Target sampling rate (in Hz).
inplace : :obj:`bool`, optional
If True, performs resampling in-place. If False, returns a resampled
copy of the current Variable. Default is False.
kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates
the kind of interpolation approach to use. See interp1d docs for
valid values. Default is 'linear'.
'''
if not inplace:
var = self.clone()
var.resample(sampling_rate, True, kind)
return var
if sampling_rate == self.sampling_rate:
return
old_sr = self.sampling_rate
n = len(self.index)
self.index = self._build_entity_index(self.run_info, sampling_rate)
x = np.arange(n)
num = len(self.index)
from scipy.interpolate import interp1d
f = interp1d(x, self.values.values.ravel(), kind=kind)
x_new = np.linspace(0, n - 1, num=num)
self.values = pd.DataFrame(f(x_new))
assert len(self.values) == len(self.index)
self.sampling_rate = sampling_rate
|
[
"def",
"resample",
"(",
"self",
",",
"sampling_rate",
",",
"inplace",
"=",
"False",
",",
"kind",
"=",
"'linear'",
")",
":",
"if",
"not",
"inplace",
":",
"var",
"=",
"self",
".",
"clone",
"(",
")",
"var",
".",
"resample",
"(",
"sampling_rate",
",",
"True",
",",
"kind",
")",
"return",
"var",
"if",
"sampling_rate",
"==",
"self",
".",
"sampling_rate",
":",
"return",
"old_sr",
"=",
"self",
".",
"sampling_rate",
"n",
"=",
"len",
"(",
"self",
".",
"index",
")",
"self",
".",
"index",
"=",
"self",
".",
"_build_entity_index",
"(",
"self",
".",
"run_info",
",",
"sampling_rate",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"n",
")",
"num",
"=",
"len",
"(",
"self",
".",
"index",
")",
"from",
"scipy",
".",
"interpolate",
"import",
"interp1d",
"f",
"=",
"interp1d",
"(",
"x",
",",
"self",
".",
"values",
".",
"values",
".",
"ravel",
"(",
")",
",",
"kind",
"=",
"kind",
")",
"x_new",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"n",
"-",
"1",
",",
"num",
"=",
"num",
")",
"self",
".",
"values",
"=",
"pd",
".",
"DataFrame",
"(",
"f",
"(",
"x_new",
")",
")",
"assert",
"len",
"(",
"self",
".",
"values",
")",
"==",
"len",
"(",
"self",
".",
"index",
")",
"self",
".",
"sampling_rate",
"=",
"sampling_rate"
] |
Resample the Variable to the specified sampling rate.
Parameters
----------
sampling_rate : :obj:`int`, :obj:`float`
Target sampling rate (in Hz).
inplace : :obj:`bool`, optional
If True, performs resampling in-place. If False, returns a resampled
copy of the current Variable. Default is False.
kind : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
Argument to pass to :obj:`scipy.interpolate.interp1d`; indicates
the kind of interpolation approach to use. See interp1d docs for
valid values. Default is 'linear'.
|
[
"Resample",
"the",
"Variable",
"to",
"the",
"specified",
"sampling",
"rate",
"."
] |
python
|
train
|
blazelibs/blazeutils
|
blazeutils/decorators.py
|
https://github.com/blazelibs/blazeutils/blob/c94476325146007553cbddeeb9ef83394756babf/blazeutils/decorators.py#L106-L130
|
def _num_required_args(func):
""" Number of args for func
>>> def foo(a, b, c=None):
... return a + b + c
>>> _num_required_args(foo)
2
>>> def bar(*args):
... return sum(args)
>>> print(_num_required_args(bar))
None
borrowed from: https://github.com/pytoolz/toolz
"""
try:
spec = inspect.getargspec(func)
if spec.varargs:
return None
num_defaults = len(spec.defaults) if spec.defaults else 0
return len(spec.args) - num_defaults
except TypeError:
return None
|
[
"def",
"_num_required_args",
"(",
"func",
")",
":",
"try",
":",
"spec",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"if",
"spec",
".",
"varargs",
":",
"return",
"None",
"num_defaults",
"=",
"len",
"(",
"spec",
".",
"defaults",
")",
"if",
"spec",
".",
"defaults",
"else",
"0",
"return",
"len",
"(",
"spec",
".",
"args",
")",
"-",
"num_defaults",
"except",
"TypeError",
":",
"return",
"None"
] |
Number of args for func
>>> def foo(a, b, c=None):
... return a + b + c
>>> _num_required_args(foo)
2
>>> def bar(*args):
... return sum(args)
>>> print(_num_required_args(bar))
None
borrowed from: https://github.com/pytoolz/toolz
|
[
"Number",
"of",
"args",
"for",
"func"
] |
python
|
train
|
volafiled/python-volapi
|
setup.py
|
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/setup.py#L22-L34
|
def find_version(filename):
"""
Search for assignment of __version__ string in given file and
return what it is assigned to.
"""
with open(filename, "r") as filep:
version_file = filep.read()
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
|
[
"def",
"find_version",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"filep",
":",
"version_file",
"=",
"filep",
".",
"read",
"(",
")",
"version_match",
"=",
"re",
".",
"search",
"(",
"r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"",
",",
"version_file",
",",
"re",
".",
"M",
")",
"if",
"version_match",
":",
"return",
"version_match",
".",
"group",
"(",
"1",
")",
"raise",
"RuntimeError",
"(",
"\"Unable to find version string.\"",
")"
] |
Search for assignment of __version__ string in given file and
return what it is assigned to.
|
[
"Search",
"for",
"assignment",
"of",
"__version__",
"string",
"in",
"given",
"file",
"and",
"return",
"what",
"it",
"is",
"assigned",
"to",
"."
] |
python
|
train
|
baliame/http-hmac-python
|
httphmac/request.py
|
https://github.com/baliame/http-hmac-python/blob/9884c0cbfdb712f9f37080a8efbfdce82850785f/httphmac/request.py#L130-L139
|
def with_url(self, url):
"""Sets the request's URL and returns the request itself.
Automatically sets the Host header according to the URL.
Keyword arguments:
url -- a string representing the URL the set for the request
"""
self.url = URL(url)
self.header["Host"] = self.url.host
return self
|
[
"def",
"with_url",
"(",
"self",
",",
"url",
")",
":",
"self",
".",
"url",
"=",
"URL",
"(",
"url",
")",
"self",
".",
"header",
"[",
"\"Host\"",
"]",
"=",
"self",
".",
"url",
".",
"host",
"return",
"self"
] |
Sets the request's URL and returns the request itself.
Automatically sets the Host header according to the URL.
Keyword arguments:
url -- a string representing the URL the set for the request
|
[
"Sets",
"the",
"request",
"s",
"URL",
"and",
"returns",
"the",
"request",
"itself",
".",
"Automatically",
"sets",
"the",
"Host",
"header",
"according",
"to",
"the",
"URL",
"."
] |
python
|
train
|
aestrivex/bctpy
|
bct/algorithms/reference.py
|
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/reference.py#L689-L718
|
def makerandCIJ_dir(n, k, seed=None):
'''
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
rng = get_rng(seed)
ix, = np.where(np.logical_not(np.eye(n)).flat)
rp = rng.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ
|
[
"def",
"makerandCIJ_dir",
"(",
"n",
",",
"k",
",",
"seed",
"=",
"None",
")",
":",
"rng",
"=",
"get_rng",
"(",
"seed",
")",
"ix",
",",
"=",
"np",
".",
"where",
"(",
"np",
".",
"logical_not",
"(",
"np",
".",
"eye",
"(",
"n",
")",
")",
".",
"flat",
")",
"rp",
"=",
"rng",
".",
"permutation",
"(",
"np",
".",
"size",
"(",
"ix",
")",
")",
"CIJ",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"CIJ",
".",
"flat",
"[",
"ix",
"[",
"rp",
"]",
"[",
":",
"k",
"]",
"]",
"=",
"1",
"return",
"CIJ"
] |
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
|
[
"This",
"function",
"generates",
"a",
"directed",
"random",
"network"
] |
python
|
train
|
cohorte/cohorte-herald
|
python/herald/transports/peer_contact.py
|
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/peer_contact.py#L68-L86
|
def __load_dump(self, message):
"""
Calls the hook method to modify the loaded peer description before
giving it to the directory
:param message: The received Herald message
:return: The updated peer description
"""
dump = message.content
if self._hook is not None:
# Call the hook
try:
updated_dump = self._hook(message, dump)
if updated_dump is not None:
# Use the new description
dump = updated_dump
except (TypeError, ValueError) as ex:
self._logger("Invalid description hook: %s", ex)
return dump
|
[
"def",
"__load_dump",
"(",
"self",
",",
"message",
")",
":",
"dump",
"=",
"message",
".",
"content",
"if",
"self",
".",
"_hook",
"is",
"not",
"None",
":",
"# Call the hook",
"try",
":",
"updated_dump",
"=",
"self",
".",
"_hook",
"(",
"message",
",",
"dump",
")",
"if",
"updated_dump",
"is",
"not",
"None",
":",
"# Use the new description",
"dump",
"=",
"updated_dump",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
"as",
"ex",
":",
"self",
".",
"_logger",
"(",
"\"Invalid description hook: %s\"",
",",
"ex",
")",
"return",
"dump"
] |
Calls the hook method to modify the loaded peer description before
giving it to the directory
:param message: The received Herald message
:return: The updated peer description
|
[
"Calls",
"the",
"hook",
"method",
"to",
"modify",
"the",
"loaded",
"peer",
"description",
"before",
"giving",
"it",
"to",
"the",
"directory"
] |
python
|
train
|
cyrus-/cypy
|
cypy/__init__.py
|
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/__init__.py#L210-L252
|
def prog_iter(bounded_iterable, delta=0.01, line_size=50):
'''Wraps the provided sequence with an iterator that tracks its progress
on the console.
>>> for i in prog_iter(xrange(100)): pass
..................................................
..................................................
(0.000331163406372 s)
More specifically, the behavior is as follows:
- Produces a progress bar on stdout, at ``delta`` increments, where
``delta`` is a percentage (represented as a float from 0.0 to 1.0)
- Newline every line_size dots (defaults to 50)
- Displays the time the loop took, as in toc() (without interfering with toc)
- A prog_iter nested in another prog_iter will not produce any of these
side effects. That is, only one progress bar will ever be printing at a time.
'''
# TODO: Technically, this should have a __len__.
global _prog_iterin_loop
if not _prog_iterin_loop:
startTime = _time.time()
_prog_iterin_loop = True
length = float(len(bounded_iterable))
_sys.stdout.write(".")
dots = 1
next = delta
for i, item in enumerate(bounded_iterable):
if (i + 1) / length >= next:
next += delta
dots += 1
_sys.stdout.write(".")
if dots % line_size == 0:
_sys.stdout.write("\n")
_sys.stdout.flush()
yield item
print((" (" + str(_time.time() - startTime) + " s)"))
_prog_iterin_loop = False
else:
for item in bounded_iterable:
yield item
|
[
"def",
"prog_iter",
"(",
"bounded_iterable",
",",
"delta",
"=",
"0.01",
",",
"line_size",
"=",
"50",
")",
":",
"# TODO: Technically, this should have a __len__.",
"global",
"_prog_iterin_loop",
"if",
"not",
"_prog_iterin_loop",
":",
"startTime",
"=",
"_time",
".",
"time",
"(",
")",
"_prog_iterin_loop",
"=",
"True",
"length",
"=",
"float",
"(",
"len",
"(",
"bounded_iterable",
")",
")",
"_sys",
".",
"stdout",
".",
"write",
"(",
"\".\"",
")",
"dots",
"=",
"1",
"next",
"=",
"delta",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"bounded_iterable",
")",
":",
"if",
"(",
"i",
"+",
"1",
")",
"/",
"length",
">=",
"next",
":",
"next",
"+=",
"delta",
"dots",
"+=",
"1",
"_sys",
".",
"stdout",
".",
"write",
"(",
"\".\"",
")",
"if",
"dots",
"%",
"line_size",
"==",
"0",
":",
"_sys",
".",
"stdout",
".",
"write",
"(",
"\"\\n\"",
")",
"_sys",
".",
"stdout",
".",
"flush",
"(",
")",
"yield",
"item",
"print",
"(",
"(",
"\" (\"",
"+",
"str",
"(",
"_time",
".",
"time",
"(",
")",
"-",
"startTime",
")",
"+",
"\" s)\"",
")",
")",
"_prog_iterin_loop",
"=",
"False",
"else",
":",
"for",
"item",
"in",
"bounded_iterable",
":",
"yield",
"item"
] |
Wraps the provided sequence with an iterator that tracks its progress
on the console.
>>> for i in prog_iter(xrange(100)): pass
..................................................
..................................................
(0.000331163406372 s)
More specifically, the behavior is as follows:
- Produces a progress bar on stdout, at ``delta`` increments, where
``delta`` is a percentage (represented as a float from 0.0 to 1.0)
- Newline every line_size dots (defaults to 50)
- Displays the time the loop took, as in toc() (without interfering with toc)
- A prog_iter nested in another prog_iter will not produce any of these
side effects. That is, only one progress bar will ever be printing at a time.
|
[
"Wraps",
"the",
"provided",
"sequence",
"with",
"an",
"iterator",
"that",
"tracks",
"its",
"progress",
"on",
"the",
"console",
"."
] |
python
|
train
|
hydpy-dev/hydpy
|
hydpy/models/dam/dam_model.py
|
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/dam/dam_model.py#L708-L731
|
def calc_requiredremoterelease_v2(self):
"""Get the required remote release of the last simulation step.
Required log sequence:
|LoggedRequiredRemoteRelease|
Calculated flux sequence:
|RequiredRemoteRelease|
Basic equation:
:math:`RequiredRemoteRelease = LoggedRequiredRemoteRelease`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> logs.loggedrequiredremoterelease = 3.0
>>> model.calc_requiredremoterelease_v2()
>>> fluxes.requiredremoterelease
requiredremoterelease(3.0)
"""
flu = self.sequences.fluxes.fastaccess
log = self.sequences.logs.fastaccess
flu.requiredremoterelease = log.loggedrequiredremoterelease[0]
|
[
"def",
"calc_requiredremoterelease_v2",
"(",
"self",
")",
":",
"flu",
"=",
"self",
".",
"sequences",
".",
"fluxes",
".",
"fastaccess",
"log",
"=",
"self",
".",
"sequences",
".",
"logs",
".",
"fastaccess",
"flu",
".",
"requiredremoterelease",
"=",
"log",
".",
"loggedrequiredremoterelease",
"[",
"0",
"]"
] |
Get the required remote release of the last simulation step.
Required log sequence:
|LoggedRequiredRemoteRelease|
Calculated flux sequence:
|RequiredRemoteRelease|
Basic equation:
:math:`RequiredRemoteRelease = LoggedRequiredRemoteRelease`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> logs.loggedrequiredremoterelease = 3.0
>>> model.calc_requiredremoterelease_v2()
>>> fluxes.requiredremoterelease
requiredremoterelease(3.0)
|
[
"Get",
"the",
"required",
"remote",
"release",
"of",
"the",
"last",
"simulation",
"step",
"."
] |
python
|
train
|
nicolargo/glances
|
glances/ports_list.py
|
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/ports_list.py#L53-L135
|
def load(self, config):
"""Load the ports list from the configuration file."""
ports_list = []
if config is None:
logger.debug("No configuration file available. Cannot load ports list.")
elif not config.has_section(self._section):
logger.debug("No [%s] section in the configuration file. Cannot load ports list." % self._section)
else:
logger.debug("Start reading the [%s] section in the configuration file" % self._section)
refresh = int(config.get_value(self._section, 'refresh', default=self._default_refresh))
timeout = int(config.get_value(self._section, 'timeout', default=self._default_timeout))
# Add default gateway on top of the ports_list lits
default_gateway = config.get_value(self._section, 'port_default_gateway', default='False')
if default_gateway.lower().startswith('true') and netifaces_tag:
new_port = {}
try:
new_port['host'] = netifaces.gateways()['default'][netifaces.AF_INET][0]
except KeyError:
new_port['host'] = None
# ICMP
new_port['port'] = 0
new_port['description'] = 'DefaultGateway'
new_port['refresh'] = refresh
new_port['timeout'] = timeout
new_port['status'] = None
new_port['rtt_warning'] = None
new_port['indice'] = str('port_0')
logger.debug("Add default gateway %s to the static list" % (new_port['host']))
ports_list.append(new_port)
# Read the scan list
for i in range(1, 256):
new_port = {}
postfix = 'port_%s_' % str(i)
# Read mandatories configuration key: host
new_port['host'] = config.get_value(self._section, '%s%s' % (postfix, 'host'))
if new_port['host'] is None:
continue
# Read optionals configuration keys
# Port is set to 0 by default. 0 mean ICMP check instead of TCP check
new_port['port'] = config.get_value(self._section,
'%s%s' % (postfix, 'port'),
0)
new_port['description'] = config.get_value(self._section,
'%sdescription' % postfix,
default="%s:%s" % (new_port['host'], new_port['port']))
# Default status
new_port['status'] = None
# Refresh rate in second
new_port['refresh'] = refresh
# Timeout in second
new_port['timeout'] = int(config.get_value(self._section,
'%stimeout' % postfix,
default=timeout))
# RTT warning
new_port['rtt_warning'] = config.get_value(self._section,
'%srtt_warning' % postfix,
default=None)
if new_port['rtt_warning'] is not None:
# Convert to second
new_port['rtt_warning'] = int(new_port['rtt_warning']) / 1000.0
# Indice
new_port['indice'] = 'port_' + str(i)
# Add the server to the list
logger.debug("Add port %s:%s to the static list" % (new_port['host'], new_port['port']))
ports_list.append(new_port)
# Ports list loaded
logger.debug("Ports list loaded: %s" % ports_list)
return ports_list
|
[
"def",
"load",
"(",
"self",
",",
"config",
")",
":",
"ports_list",
"=",
"[",
"]",
"if",
"config",
"is",
"None",
":",
"logger",
".",
"debug",
"(",
"\"No configuration file available. Cannot load ports list.\"",
")",
"elif",
"not",
"config",
".",
"has_section",
"(",
"self",
".",
"_section",
")",
":",
"logger",
".",
"debug",
"(",
"\"No [%s] section in the configuration file. Cannot load ports list.\"",
"%",
"self",
".",
"_section",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Start reading the [%s] section in the configuration file\"",
"%",
"self",
".",
"_section",
")",
"refresh",
"=",
"int",
"(",
"config",
".",
"get_value",
"(",
"self",
".",
"_section",
",",
"'refresh'",
",",
"default",
"=",
"self",
".",
"_default_refresh",
")",
")",
"timeout",
"=",
"int",
"(",
"config",
".",
"get_value",
"(",
"self",
".",
"_section",
",",
"'timeout'",
",",
"default",
"=",
"self",
".",
"_default_timeout",
")",
")",
"# Add default gateway on top of the ports_list lits",
"default_gateway",
"=",
"config",
".",
"get_value",
"(",
"self",
".",
"_section",
",",
"'port_default_gateway'",
",",
"default",
"=",
"'False'",
")",
"if",
"default_gateway",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'true'",
")",
"and",
"netifaces_tag",
":",
"new_port",
"=",
"{",
"}",
"try",
":",
"new_port",
"[",
"'host'",
"]",
"=",
"netifaces",
".",
"gateways",
"(",
")",
"[",
"'default'",
"]",
"[",
"netifaces",
".",
"AF_INET",
"]",
"[",
"0",
"]",
"except",
"KeyError",
":",
"new_port",
"[",
"'host'",
"]",
"=",
"None",
"# ICMP",
"new_port",
"[",
"'port'",
"]",
"=",
"0",
"new_port",
"[",
"'description'",
"]",
"=",
"'DefaultGateway'",
"new_port",
"[",
"'refresh'",
"]",
"=",
"refresh",
"new_port",
"[",
"'timeout'",
"]",
"=",
"timeout",
"new_port",
"[",
"'status'",
"]",
"=",
"None",
"new_port",
"[",
"'rtt_warning'",
"]",
"=",
"None",
"new_port",
"[",
"'indice'",
"]",
"=",
"str",
"(",
"'port_0'",
")",
"logger",
".",
"debug",
"(",
"\"Add default gateway %s to the static list\"",
"%",
"(",
"new_port",
"[",
"'host'",
"]",
")",
")",
"ports_list",
".",
"append",
"(",
"new_port",
")",
"# Read the scan list",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"256",
")",
":",
"new_port",
"=",
"{",
"}",
"postfix",
"=",
"'port_%s_'",
"%",
"str",
"(",
"i",
")",
"# Read mandatories configuration key: host",
"new_port",
"[",
"'host'",
"]",
"=",
"config",
".",
"get_value",
"(",
"self",
".",
"_section",
",",
"'%s%s'",
"%",
"(",
"postfix",
",",
"'host'",
")",
")",
"if",
"new_port",
"[",
"'host'",
"]",
"is",
"None",
":",
"continue",
"# Read optionals configuration keys",
"# Port is set to 0 by default. 0 mean ICMP check instead of TCP check",
"new_port",
"[",
"'port'",
"]",
"=",
"config",
".",
"get_value",
"(",
"self",
".",
"_section",
",",
"'%s%s'",
"%",
"(",
"postfix",
",",
"'port'",
")",
",",
"0",
")",
"new_port",
"[",
"'description'",
"]",
"=",
"config",
".",
"get_value",
"(",
"self",
".",
"_section",
",",
"'%sdescription'",
"%",
"postfix",
",",
"default",
"=",
"\"%s:%s\"",
"%",
"(",
"new_port",
"[",
"'host'",
"]",
",",
"new_port",
"[",
"'port'",
"]",
")",
")",
"# Default status",
"new_port",
"[",
"'status'",
"]",
"=",
"None",
"# Refresh rate in second",
"new_port",
"[",
"'refresh'",
"]",
"=",
"refresh",
"# Timeout in second",
"new_port",
"[",
"'timeout'",
"]",
"=",
"int",
"(",
"config",
".",
"get_value",
"(",
"self",
".",
"_section",
",",
"'%stimeout'",
"%",
"postfix",
",",
"default",
"=",
"timeout",
")",
")",
"# RTT warning",
"new_port",
"[",
"'rtt_warning'",
"]",
"=",
"config",
".",
"get_value",
"(",
"self",
".",
"_section",
",",
"'%srtt_warning'",
"%",
"postfix",
",",
"default",
"=",
"None",
")",
"if",
"new_port",
"[",
"'rtt_warning'",
"]",
"is",
"not",
"None",
":",
"# Convert to second",
"new_port",
"[",
"'rtt_warning'",
"]",
"=",
"int",
"(",
"new_port",
"[",
"'rtt_warning'",
"]",
")",
"/",
"1000.0",
"# Indice",
"new_port",
"[",
"'indice'",
"]",
"=",
"'port_'",
"+",
"str",
"(",
"i",
")",
"# Add the server to the list",
"logger",
".",
"debug",
"(",
"\"Add port %s:%s to the static list\"",
"%",
"(",
"new_port",
"[",
"'host'",
"]",
",",
"new_port",
"[",
"'port'",
"]",
")",
")",
"ports_list",
".",
"append",
"(",
"new_port",
")",
"# Ports list loaded",
"logger",
".",
"debug",
"(",
"\"Ports list loaded: %s\"",
"%",
"ports_list",
")",
"return",
"ports_list"
] |
Load the ports list from the configuration file.
|
[
"Load",
"the",
"ports",
"list",
"from",
"the",
"configuration",
"file",
"."
] |
python
|
train
|
dmonroy/chilero
|
chilero/web/__init__.py
|
https://github.com/dmonroy/chilero/blob/8f1118a60cb7eab3f9ad31cb8a14b30bc102893d/chilero/web/__init__.py#L25-L41
|
def run(cls, routes, *args, **kwargs): # pragma: no cover
"""
Run a web application.
:param cls: Application class
:param routes: list of routes
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: None
"""
app = init(cls, routes, *args, **kwargs)
HOST = os.getenv('HOST', '0.0.0.0')
PORT = int(os.getenv('PORT', 8000))
aiohttp.web.run_app(app, port=PORT, host=HOST)
|
[
"def",
"run",
"(",
"cls",
",",
"routes",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# pragma: no cover",
"app",
"=",
"init",
"(",
"cls",
",",
"routes",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"HOST",
"=",
"os",
".",
"getenv",
"(",
"'HOST'",
",",
"'0.0.0.0'",
")",
"PORT",
"=",
"int",
"(",
"os",
".",
"getenv",
"(",
"'PORT'",
",",
"8000",
")",
")",
"aiohttp",
".",
"web",
".",
"run_app",
"(",
"app",
",",
"port",
"=",
"PORT",
",",
"host",
"=",
"HOST",
")"
] |
Run a web application.
:param cls: Application class
:param routes: list of routes
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: None
|
[
"Run",
"a",
"web",
"application",
"."
] |
python
|
train
|
mitsei/dlkit
|
dlkit/handcar/repository/sessions.py
|
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/sessions.py#L994-L1014
|
def can_create_asset_content(self, asset_id=None):
"""Tests if this user can create content for ``Assets``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating an ``Asset``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer create
operations to an unauthorized user.
:param asset_id: the ``Id`` of an ``Asset``
:type asset_id: ``osid.id.Id``
:return: ``false`` if ``Asset`` content ceration is not authorized, ``true`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``asset_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
url_path = construct_url('authorization',
bank_id=self._catalog_idstr)
return self._get_request(url_path)['assetHints']['canCreate']
|
[
"def",
"can_create_asset_content",
"(",
"self",
",",
"asset_id",
"=",
"None",
")",
":",
"url_path",
"=",
"construct_url",
"(",
"'authorization'",
",",
"bank_id",
"=",
"self",
".",
"_catalog_idstr",
")",
"return",
"self",
".",
"_get_request",
"(",
"url_path",
")",
"[",
"'assetHints'",
"]",
"[",
"'canCreate'",
"]"
] |
Tests if this user can create content for ``Assets``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating an ``Asset``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer create
operations to an unauthorized user.
:param asset_id: the ``Id`` of an ``Asset``
:type asset_id: ``osid.id.Id``
:return: ``false`` if ``Asset`` content ceration is not authorized, ``true`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``asset_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
|
[
"Tests",
"if",
"this",
"user",
"can",
"create",
"content",
"for",
"Assets",
"."
] |
python
|
train
|
jalmeroth/pymusiccast
|
pymusiccast/__init__.py
|
https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/pymusiccast/__init__.py#L272-L276
|
def set_playback(self, playback):
"""Send Playback command."""
req_url = ENDPOINTS["setPlayback"].format(self._ip_address)
params = {"playback": playback}
return request(req_url, params=params)
|
[
"def",
"set_playback",
"(",
"self",
",",
"playback",
")",
":",
"req_url",
"=",
"ENDPOINTS",
"[",
"\"setPlayback\"",
"]",
".",
"format",
"(",
"self",
".",
"_ip_address",
")",
"params",
"=",
"{",
"\"playback\"",
":",
"playback",
"}",
"return",
"request",
"(",
"req_url",
",",
"params",
"=",
"params",
")"
] |
Send Playback command.
|
[
"Send",
"Playback",
"command",
"."
] |
python
|
train
|
bpsmith/tia
|
tia/rlab/table.py
|
https://github.com/bpsmith/tia/blob/a7043b6383e557aeea8fc7112bbffd6e36a230e9/tia/rlab/table.py#L450-L462
|
def apply_format(self, fmtfct):
"""
For each cell in the region, invoke fmtfct(cell_value) and store result in the formatted_values
:param fmtfct: function(cell_value) which should return a formatted value for display
:return: self
"""
for ridx in range(self.nrows):
for cidx in range(self.ncols):
# MUST set the parent as local view is immutable
riloc = self.row_ilocs[ridx]
ciloc = self.col_ilocs[cidx]
self.parent.formatted_values.iloc[riloc, ciloc] = fmtfct(self.actual_values.iloc[ridx, cidx])
return self
|
[
"def",
"apply_format",
"(",
"self",
",",
"fmtfct",
")",
":",
"for",
"ridx",
"in",
"range",
"(",
"self",
".",
"nrows",
")",
":",
"for",
"cidx",
"in",
"range",
"(",
"self",
".",
"ncols",
")",
":",
"# MUST set the parent as local view is immutable",
"riloc",
"=",
"self",
".",
"row_ilocs",
"[",
"ridx",
"]",
"ciloc",
"=",
"self",
".",
"col_ilocs",
"[",
"cidx",
"]",
"self",
".",
"parent",
".",
"formatted_values",
".",
"iloc",
"[",
"riloc",
",",
"ciloc",
"]",
"=",
"fmtfct",
"(",
"self",
".",
"actual_values",
".",
"iloc",
"[",
"ridx",
",",
"cidx",
"]",
")",
"return",
"self"
] |
For each cell in the region, invoke fmtfct(cell_value) and store result in the formatted_values
:param fmtfct: function(cell_value) which should return a formatted value for display
:return: self
|
[
"For",
"each",
"cell",
"in",
"the",
"region",
"invoke",
"fmtfct",
"(",
"cell_value",
")",
"and",
"store",
"result",
"in",
"the",
"formatted_values",
":",
"param",
"fmtfct",
":",
"function",
"(",
"cell_value",
")",
"which",
"should",
"return",
"a",
"formatted",
"value",
"for",
"display",
":",
"return",
":",
"self"
] |
python
|
train
|
bunq/sdk_python
|
bunq/sdk/json/converter.py
|
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/json/converter.py#L195-L217
|
def _deserialize_dict_attributes(cls, cls_context, dict_):
"""
:type cls_context: type
:type dict_: dict
:rtype: dict
"""
dict_deserialized = {}
for key in dict_.keys():
key_deserialized = cls._deserialize_key(key)
value_specs = cls._get_value_specs(cls_context, key_deserialized)
if value_specs is not None:
dict_deserialized[value_specs.name] = cls._deserialize_value(
value_specs.types,
dict_[key]
)
else:
cls._warn_key_unknown(cls_context, key)
return dict_deserialized
|
[
"def",
"_deserialize_dict_attributes",
"(",
"cls",
",",
"cls_context",
",",
"dict_",
")",
":",
"dict_deserialized",
"=",
"{",
"}",
"for",
"key",
"in",
"dict_",
".",
"keys",
"(",
")",
":",
"key_deserialized",
"=",
"cls",
".",
"_deserialize_key",
"(",
"key",
")",
"value_specs",
"=",
"cls",
".",
"_get_value_specs",
"(",
"cls_context",
",",
"key_deserialized",
")",
"if",
"value_specs",
"is",
"not",
"None",
":",
"dict_deserialized",
"[",
"value_specs",
".",
"name",
"]",
"=",
"cls",
".",
"_deserialize_value",
"(",
"value_specs",
".",
"types",
",",
"dict_",
"[",
"key",
"]",
")",
"else",
":",
"cls",
".",
"_warn_key_unknown",
"(",
"cls_context",
",",
"key",
")",
"return",
"dict_deserialized"
] |
:type cls_context: type
:type dict_: dict
:rtype: dict
|
[
":",
"type",
"cls_context",
":",
"type",
":",
"type",
"dict_",
":",
"dict"
] |
python
|
train
|
zhanglab/psamm
|
psamm/gapfill.py
|
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/gapfill.py#L36-L49
|
def _find_integer_tolerance(epsilon, v_max, min_tol):
"""Find appropriate integer tolerance for gap-filling problems."""
int_tol = min(epsilon / (10 * v_max), 0.1)
min_tol = max(1e-10, min_tol)
if int_tol < min_tol:
eps_lower = min_tol * 10 * v_max
logger.warning(
'When the maximum flux is {}, it is recommended that'
' epsilon > {} to avoid numerical issues with this'
' solver. Results may be incorrect with'
' the current settings!'.format(v_max, eps_lower))
return min_tol
return int_tol
|
[
"def",
"_find_integer_tolerance",
"(",
"epsilon",
",",
"v_max",
",",
"min_tol",
")",
":",
"int_tol",
"=",
"min",
"(",
"epsilon",
"/",
"(",
"10",
"*",
"v_max",
")",
",",
"0.1",
")",
"min_tol",
"=",
"max",
"(",
"1e-10",
",",
"min_tol",
")",
"if",
"int_tol",
"<",
"min_tol",
":",
"eps_lower",
"=",
"min_tol",
"*",
"10",
"*",
"v_max",
"logger",
".",
"warning",
"(",
"'When the maximum flux is {}, it is recommended that'",
"' epsilon > {} to avoid numerical issues with this'",
"' solver. Results may be incorrect with'",
"' the current settings!'",
".",
"format",
"(",
"v_max",
",",
"eps_lower",
")",
")",
"return",
"min_tol",
"return",
"int_tol"
] |
Find appropriate integer tolerance for gap-filling problems.
|
[
"Find",
"appropriate",
"integer",
"tolerance",
"for",
"gap",
"-",
"filling",
"problems",
"."
] |
python
|
train
|
PyCQA/pylint
|
pylint/checkers/utils.py
|
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/utils.py#L1128-L1158
|
def is_registered_in_singledispatch_function(node: astroid.FunctionDef) -> bool:
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
"functools.singledispatch",
"singledispatch.singledispatch",
)
if not isinstance(node, astroid.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, astroid.Call):
continue
func = decorator.func
if not isinstance(func, astroid.Attribute) or func.attrname != "register":
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, astroid.FunctionDef):
# pylint: disable=redundant-keyword-arg; some flow inference goes wrong here
return decorated_with(func_def, singledispatch_qnames)
return False
|
[
"def",
"is_registered_in_singledispatch_function",
"(",
"node",
":",
"astroid",
".",
"FunctionDef",
")",
"->",
"bool",
":",
"singledispatch_qnames",
"=",
"(",
"\"functools.singledispatch\"",
",",
"\"singledispatch.singledispatch\"",
",",
")",
"if",
"not",
"isinstance",
"(",
"node",
",",
"astroid",
".",
"FunctionDef",
")",
":",
"return",
"False",
"decorators",
"=",
"node",
".",
"decorators",
".",
"nodes",
"if",
"node",
".",
"decorators",
"else",
"[",
"]",
"for",
"decorator",
"in",
"decorators",
":",
"# func.register are function calls",
"if",
"not",
"isinstance",
"(",
"decorator",
",",
"astroid",
".",
"Call",
")",
":",
"continue",
"func",
"=",
"decorator",
".",
"func",
"if",
"not",
"isinstance",
"(",
"func",
",",
"astroid",
".",
"Attribute",
")",
"or",
"func",
".",
"attrname",
"!=",
"\"register\"",
":",
"continue",
"try",
":",
"func_def",
"=",
"next",
"(",
"func",
".",
"expr",
".",
"infer",
"(",
")",
")",
"except",
"astroid",
".",
"InferenceError",
":",
"continue",
"if",
"isinstance",
"(",
"func_def",
",",
"astroid",
".",
"FunctionDef",
")",
":",
"# pylint: disable=redundant-keyword-arg; some flow inference goes wrong here",
"return",
"decorated_with",
"(",
"func_def",
",",
"singledispatch_qnames",
")",
"return",
"False"
] |
Check if the given function node is a singledispatch function.
|
[
"Check",
"if",
"the",
"given",
"function",
"node",
"is",
"a",
"singledispatch",
"function",
"."
] |
python
|
test
|
jazzband/django-ddp
|
dddp/websocket.py
|
https://github.com/jazzband/django-ddp/blob/1e1954b06fe140346acea43582515991685e4e01/dddp/websocket.py#L233-L288
|
def process_ddp(self, data):
"""Process a single DDP message."""
msg_id = data.get('id', None)
try:
msg = data.pop('msg')
except KeyError:
self.reply(
'error', reason='Bad request',
offendingMessage=data,
)
return
try:
# dispatch message
self.dispatch(msg, data)
except Exception as err: # pylint: disable=broad-except
# This should be the only protocol exception handler
kwargs = {
'msg': {'method': 'result'}.get(msg, 'error'),
}
if msg_id is not None:
kwargs['id'] = msg_id
if isinstance(err, MeteorError):
error = err.as_dict()
else:
error = {
'error': 500,
'reason': 'Internal server error',
}
if kwargs['msg'] == 'error':
kwargs.update(error)
else:
kwargs['error'] = error
if not isinstance(err, MeteorError):
# not a client error, should always be logged.
stack, _ = safe_call(
self.logger.error, '%r %r', msg, data, exc_info=1,
)
if stack is not None:
# something went wrong while logging the error, revert to
# writing a stack trace to stderr.
traceback.print_exc(file=sys.stderr)
sys.stderr.write(
'Additionally, while handling the above error the '
'following error was encountered:\n'
)
sys.stderr.write(stack)
elif settings.DEBUG:
print('ERROR: %s' % err)
dprint('msg', msg)
dprint('data', data)
error.setdefault('details', traceback.format_exc())
# print stack trace for client errors when DEBUG is True.
print(error['details'])
self.reply(**kwargs)
if msg_id and msg == 'method':
self.reply('updated', methods=[msg_id])
|
[
"def",
"process_ddp",
"(",
"self",
",",
"data",
")",
":",
"msg_id",
"=",
"data",
".",
"get",
"(",
"'id'",
",",
"None",
")",
"try",
":",
"msg",
"=",
"data",
".",
"pop",
"(",
"'msg'",
")",
"except",
"KeyError",
":",
"self",
".",
"reply",
"(",
"'error'",
",",
"reason",
"=",
"'Bad request'",
",",
"offendingMessage",
"=",
"data",
",",
")",
"return",
"try",
":",
"# dispatch message",
"self",
".",
"dispatch",
"(",
"msg",
",",
"data",
")",
"except",
"Exception",
"as",
"err",
":",
"# pylint: disable=broad-except",
"# This should be the only protocol exception handler",
"kwargs",
"=",
"{",
"'msg'",
":",
"{",
"'method'",
":",
"'result'",
"}",
".",
"get",
"(",
"msg",
",",
"'error'",
")",
",",
"}",
"if",
"msg_id",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'id'",
"]",
"=",
"msg_id",
"if",
"isinstance",
"(",
"err",
",",
"MeteorError",
")",
":",
"error",
"=",
"err",
".",
"as_dict",
"(",
")",
"else",
":",
"error",
"=",
"{",
"'error'",
":",
"500",
",",
"'reason'",
":",
"'Internal server error'",
",",
"}",
"if",
"kwargs",
"[",
"'msg'",
"]",
"==",
"'error'",
":",
"kwargs",
".",
"update",
"(",
"error",
")",
"else",
":",
"kwargs",
"[",
"'error'",
"]",
"=",
"error",
"if",
"not",
"isinstance",
"(",
"err",
",",
"MeteorError",
")",
":",
"# not a client error, should always be logged.",
"stack",
",",
"_",
"=",
"safe_call",
"(",
"self",
".",
"logger",
".",
"error",
",",
"'%r %r'",
",",
"msg",
",",
"data",
",",
"exc_info",
"=",
"1",
",",
")",
"if",
"stack",
"is",
"not",
"None",
":",
"# something went wrong while logging the error, revert to",
"# writing a stack trace to stderr.",
"traceback",
".",
"print_exc",
"(",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"'Additionally, while handling the above error the '",
"'following error was encountered:\\n'",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"stack",
")",
"elif",
"settings",
".",
"DEBUG",
":",
"print",
"(",
"'ERROR: %s'",
"%",
"err",
")",
"dprint",
"(",
"'msg'",
",",
"msg",
")",
"dprint",
"(",
"'data'",
",",
"data",
")",
"error",
".",
"setdefault",
"(",
"'details'",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
"# print stack trace for client errors when DEBUG is True.",
"print",
"(",
"error",
"[",
"'details'",
"]",
")",
"self",
".",
"reply",
"(",
"*",
"*",
"kwargs",
")",
"if",
"msg_id",
"and",
"msg",
"==",
"'method'",
":",
"self",
".",
"reply",
"(",
"'updated'",
",",
"methods",
"=",
"[",
"msg_id",
"]",
")"
] |
Process a single DDP message.
|
[
"Process",
"a",
"single",
"DDP",
"message",
"."
] |
python
|
test
|
TomasTomecek/sen
|
sen/tui/widgets/list/common.py
|
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/widgets/list/common.py#L23-L42
|
def strip_from_ansi_esc_sequences(text):
"""
find ANSI escape sequences in text and remove them
:param text: str
:return: list, should be passed to ListBox
"""
# esc[ + values + control character
# h, l, p commands are complicated, let's ignore them
seq_regex = r"\x1b\[[0-9;]*[mKJusDCBAfH]"
regex = re.compile(seq_regex)
start = 0
response = ""
for match in regex.finditer(text):
end = match.start()
response += text[start:end]
start = match.end()
response += text[start:len(text)]
return response
|
[
"def",
"strip_from_ansi_esc_sequences",
"(",
"text",
")",
":",
"# esc[ + values + control character",
"# h, l, p commands are complicated, let's ignore them",
"seq_regex",
"=",
"r\"\\x1b\\[[0-9;]*[mKJusDCBAfH]\"",
"regex",
"=",
"re",
".",
"compile",
"(",
"seq_regex",
")",
"start",
"=",
"0",
"response",
"=",
"\"\"",
"for",
"match",
"in",
"regex",
".",
"finditer",
"(",
"text",
")",
":",
"end",
"=",
"match",
".",
"start",
"(",
")",
"response",
"+=",
"text",
"[",
"start",
":",
"end",
"]",
"start",
"=",
"match",
".",
"end",
"(",
")",
"response",
"+=",
"text",
"[",
"start",
":",
"len",
"(",
"text",
")",
"]",
"return",
"response"
] |
find ANSI escape sequences in text and remove them
:param text: str
:return: list, should be passed to ListBox
|
[
"find",
"ANSI",
"escape",
"sequences",
"in",
"text",
"and",
"remove",
"them"
] |
python
|
train
|
thebigmunch/gmusicapi-wrapper
|
gmusicapi_wrapper/base.py
|
https://github.com/thebigmunch/gmusicapi-wrapper/blob/8708683cd33955def1378fc28319ef37805b851d/gmusicapi_wrapper/base.py#L129-L193
|
def get_local_playlist_songs(
playlist, include_filters=None, exclude_filters=None,
all_includes=False, all_excludes=False, exclude_patterns=None):
"""Load songs from local playlist.
Parameters:
playlist (str): An M3U(8) playlist filepath.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields. Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields. Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
exclude_patterns (list or str): Pattern(s) to exclude.
Patterns are Python regex patterns.
Filepaths are excluded if they match any of the exclude patterns.
Returns:
A list of local playlist song filepaths matching criteria,
a list of local playlist song filepaths filtered out using filter criteria,
and a list of local playlist song filepaths excluded using exclusion criteria.
"""
logger.info("Loading local playlist songs...")
if os.name == 'nt' and CYGPATH_RE.match(playlist):
playlist = convert_cygwin_path(playlist)
filepaths = []
base_filepath = os.path.dirname(os.path.abspath(playlist))
with open(playlist) as local_playlist:
for line in local_playlist.readlines():
line = line.strip()
if line.lower().endswith(SUPPORTED_SONG_FORMATS):
path = line
if not os.path.isabs(path):
path = os.path.join(base_filepath, path)
if os.path.isfile(path):
filepaths.append(path)
supported_filepaths = get_supported_filepaths(filepaths, SUPPORTED_SONG_FORMATS)
included_songs, excluded_songs = exclude_filepaths(supported_filepaths, exclude_patterns=exclude_patterns)
matched_songs, filtered_songs = filter_local_songs(
included_songs, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes
)
logger.info("Excluded {0} local playlist songs".format(len(excluded_songs)))
logger.info("Filtered {0} local playlist songs".format(len(filtered_songs)))
logger.info("Loaded {0} local playlist songs".format(len(matched_songs)))
return matched_songs, filtered_songs, excluded_songs
|
[
"def",
"get_local_playlist_songs",
"(",
"playlist",
",",
"include_filters",
"=",
"None",
",",
"exclude_filters",
"=",
"None",
",",
"all_includes",
"=",
"False",
",",
"all_excludes",
"=",
"False",
",",
"exclude_patterns",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"\"Loading local playlist songs...\"",
")",
"if",
"os",
".",
"name",
"==",
"'nt'",
"and",
"CYGPATH_RE",
".",
"match",
"(",
"playlist",
")",
":",
"playlist",
"=",
"convert_cygwin_path",
"(",
"playlist",
")",
"filepaths",
"=",
"[",
"]",
"base_filepath",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"playlist",
")",
")",
"with",
"open",
"(",
"playlist",
")",
"as",
"local_playlist",
":",
"for",
"line",
"in",
"local_playlist",
".",
"readlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"SUPPORTED_SONG_FORMATS",
")",
":",
"path",
"=",
"line",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_filepath",
",",
"path",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"filepaths",
".",
"append",
"(",
"path",
")",
"supported_filepaths",
"=",
"get_supported_filepaths",
"(",
"filepaths",
",",
"SUPPORTED_SONG_FORMATS",
")",
"included_songs",
",",
"excluded_songs",
"=",
"exclude_filepaths",
"(",
"supported_filepaths",
",",
"exclude_patterns",
"=",
"exclude_patterns",
")",
"matched_songs",
",",
"filtered_songs",
"=",
"filter_local_songs",
"(",
"included_songs",
",",
"include_filters",
"=",
"include_filters",
",",
"exclude_filters",
"=",
"exclude_filters",
",",
"all_includes",
"=",
"all_includes",
",",
"all_excludes",
"=",
"all_excludes",
")",
"logger",
".",
"info",
"(",
"\"Excluded {0} local playlist songs\"",
".",
"format",
"(",
"len",
"(",
"excluded_songs",
")",
")",
")",
"logger",
".",
"info",
"(",
"\"Filtered {0} local playlist songs\"",
".",
"format",
"(",
"len",
"(",
"filtered_songs",
")",
")",
")",
"logger",
".",
"info",
"(",
"\"Loaded {0} local playlist songs\"",
".",
"format",
"(",
"len",
"(",
"matched_songs",
")",
")",
")",
"return",
"matched_songs",
",",
"filtered_songs",
",",
"excluded_songs"
] |
Load songs from local playlist.
Parameters:
playlist (str): An M3U(8) playlist filepath.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields. Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid mutagen metadata fields. Patterns are Python regex patterns.
Local songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
exclude_patterns (list or str): Pattern(s) to exclude.
Patterns are Python regex patterns.
Filepaths are excluded if they match any of the exclude patterns.
Returns:
A list of local playlist song filepaths matching criteria,
a list of local playlist song filepaths filtered out using filter criteria,
and a list of local playlist song filepaths excluded using exclusion criteria.
|
[
"Load",
"songs",
"from",
"local",
"playlist",
"."
] |
python
|
valid
|
lmjohns3/theanets
|
theanets/graph.py
|
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/graph.py#L240-L365
|
def itertrain(self, train, valid=None, algo='rmsprop', subalgo='rmsprop',
save_every=0, save_progress=None, **kwargs):
'''Train our network, one batch at a time.
This method yields a series of ``(train, valid)`` monitor pairs. The
``train`` value is a dictionary mapping names to monitor values
evaluated on the training dataset. The ``valid`` value is also a
dictionary mapping names to values, but these values are evaluated on
the validation dataset.
Because validation might not occur every training iteration, the
validation monitors might be repeated for multiple training iterations.
It is probably most helpful to think of the validation monitors as being
the "most recent" values that have been computed.
After training completes, the network attribute of this class will
contain the trained network parameters.
Parameters
----------
train : :class:`Dataset <downhill.dataset.Dataset>` or list
A dataset to use when training the network. If this is a
``downhill.Dataset`` instance, it will be used directly as the
training datset. If it is a list of numpy arrays or a list of
callables, it will be converted to a ``downhill.Dataset`` and then
used as the training set.
valid : :class:`Dataset <downhill.dataset.Dataset>` or list, optional
If this is provided, it will be used as a validation dataset. If not
provided, the training set will be used for validation. (This is not
recommended!)
algo : str, optional
An optimization algorithm to use for training our network. If not
provided, :class:`RMSProp <downhill.adaptive.RMSProp>` will be used.
subalgo : str, optional
An optimization algorithm to use for a trainer that requires a
"sub-algorithm," sugh as an unsupervised pretrainer. Defaults to
:class:`RMSProp <downhill.adaptive.RMSProp>`.
save_every : int or float, optional
If this is nonzero and ``save_progress`` is not None, then the model
being trained will be saved periodically. If this is a float, it is
treated as a number of minutes to wait between savings. If it is an
int, it is treated as the number of training epochs to wait between
savings. Defaults to 0.
save_progress : str or file handle, optional
If this is not None, and ``save_progress`` is nonzero, then save the
model periodically during training. This parameter gives either (a)
the full path of a file to save the model, or (b) a file-like object
where the model should be saved. If it is a string and the given
name contains a "{}" format specifier, it will be filled with the
integer Unix timestamp at the time the model is saved. Defaults to
None, which does not save models.
Yields
------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
'''
if 'rng' not in kwargs:
kwargs['rng'] = self._rng
def create_dataset(data, **kwargs):
name = kwargs.get('name', 'dataset')
s = '{}_batches'.format(name)
return downhill.Dataset(
data,
name=name,
batch_size=kwargs.get('batch_size', 32),
iteration_size=kwargs.get('iteration_size', kwargs.get(s)),
axis=kwargs.get('axis', 0),
rng=kwargs['rng'])
# set up datasets ...
if valid is None:
valid = train
if not isinstance(valid, downhill.Dataset):
valid = create_dataset(valid, name='valid', **kwargs)
if not isinstance(train, downhill.Dataset):
train = create_dataset(train, name='train', **kwargs)
if 'algorithm' in kwargs:
warnings.warn(
'please use the "algo" keyword arg instead of "algorithm"',
DeprecationWarning)
algo = kwargs.pop('algorithm')
if isinstance(algo, (list, tuple)):
algo = algo[0]
# set up trainer ...
if isinstance(algo, util.basestring):
algo = algo.lower()
if algo == 'sample':
algo = trainer.SampleTrainer(self)
elif algo.startswith('layer') or algo.startswith('sup'):
algo = trainer.SupervisedPretrainer(subalgo, self)
elif algo.startswith('pre') or algo.startswith('unsup'):
algo = trainer.UnsupervisedPretrainer(subalgo, self)
else:
algo = trainer.DownhillTrainer(algo, self)
# set up check to save model ...
def needs_saving(elapsed, iteration):
if save_progress is None:
return False
if isinstance(save_every, float):
return elapsed > 60 * save_every
if isinstance(save_every, int):
return iteration % save_every == 0
return False
# train it!
start = time.time()
for i, monitors in enumerate(algo.itertrain(train, valid, **kwargs)):
yield monitors
now = time.time()
if i and needs_saving(now - start, i):
filename_or_handle = save_progress
if isinstance(filename_or_handle, util.basestring):
filename_or_handle = save_progress.format(int(now))
self.save(filename_or_handle)
start = now
|
[
"def",
"itertrain",
"(",
"self",
",",
"train",
",",
"valid",
"=",
"None",
",",
"algo",
"=",
"'rmsprop'",
",",
"subalgo",
"=",
"'rmsprop'",
",",
"save_every",
"=",
"0",
",",
"save_progress",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'rng'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'rng'",
"]",
"=",
"self",
".",
"_rng",
"def",
"create_dataset",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"name",
"=",
"kwargs",
".",
"get",
"(",
"'name'",
",",
"'dataset'",
")",
"s",
"=",
"'{}_batches'",
".",
"format",
"(",
"name",
")",
"return",
"downhill",
".",
"Dataset",
"(",
"data",
",",
"name",
"=",
"name",
",",
"batch_size",
"=",
"kwargs",
".",
"get",
"(",
"'batch_size'",
",",
"32",
")",
",",
"iteration_size",
"=",
"kwargs",
".",
"get",
"(",
"'iteration_size'",
",",
"kwargs",
".",
"get",
"(",
"s",
")",
")",
",",
"axis",
"=",
"kwargs",
".",
"get",
"(",
"'axis'",
",",
"0",
")",
",",
"rng",
"=",
"kwargs",
"[",
"'rng'",
"]",
")",
"# set up datasets ...",
"if",
"valid",
"is",
"None",
":",
"valid",
"=",
"train",
"if",
"not",
"isinstance",
"(",
"valid",
",",
"downhill",
".",
"Dataset",
")",
":",
"valid",
"=",
"create_dataset",
"(",
"valid",
",",
"name",
"=",
"'valid'",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"isinstance",
"(",
"train",
",",
"downhill",
".",
"Dataset",
")",
":",
"train",
"=",
"create_dataset",
"(",
"train",
",",
"name",
"=",
"'train'",
",",
"*",
"*",
"kwargs",
")",
"if",
"'algorithm'",
"in",
"kwargs",
":",
"warnings",
".",
"warn",
"(",
"'please use the \"algo\" keyword arg instead of \"algorithm\"'",
",",
"DeprecationWarning",
")",
"algo",
"=",
"kwargs",
".",
"pop",
"(",
"'algorithm'",
")",
"if",
"isinstance",
"(",
"algo",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"algo",
"=",
"algo",
"[",
"0",
"]",
"# set up trainer ...",
"if",
"isinstance",
"(",
"algo",
",",
"util",
".",
"basestring",
")",
":",
"algo",
"=",
"algo",
".",
"lower",
"(",
")",
"if",
"algo",
"==",
"'sample'",
":",
"algo",
"=",
"trainer",
".",
"SampleTrainer",
"(",
"self",
")",
"elif",
"algo",
".",
"startswith",
"(",
"'layer'",
")",
"or",
"algo",
".",
"startswith",
"(",
"'sup'",
")",
":",
"algo",
"=",
"trainer",
".",
"SupervisedPretrainer",
"(",
"subalgo",
",",
"self",
")",
"elif",
"algo",
".",
"startswith",
"(",
"'pre'",
")",
"or",
"algo",
".",
"startswith",
"(",
"'unsup'",
")",
":",
"algo",
"=",
"trainer",
".",
"UnsupervisedPretrainer",
"(",
"subalgo",
",",
"self",
")",
"else",
":",
"algo",
"=",
"trainer",
".",
"DownhillTrainer",
"(",
"algo",
",",
"self",
")",
"# set up check to save model ...",
"def",
"needs_saving",
"(",
"elapsed",
",",
"iteration",
")",
":",
"if",
"save_progress",
"is",
"None",
":",
"return",
"False",
"if",
"isinstance",
"(",
"save_every",
",",
"float",
")",
":",
"return",
"elapsed",
">",
"60",
"*",
"save_every",
"if",
"isinstance",
"(",
"save_every",
",",
"int",
")",
":",
"return",
"iteration",
"%",
"save_every",
"==",
"0",
"return",
"False",
"# train it!",
"start",
"=",
"time",
".",
"time",
"(",
")",
"for",
"i",
",",
"monitors",
"in",
"enumerate",
"(",
"algo",
".",
"itertrain",
"(",
"train",
",",
"valid",
",",
"*",
"*",
"kwargs",
")",
")",
":",
"yield",
"monitors",
"now",
"=",
"time",
".",
"time",
"(",
")",
"if",
"i",
"and",
"needs_saving",
"(",
"now",
"-",
"start",
",",
"i",
")",
":",
"filename_or_handle",
"=",
"save_progress",
"if",
"isinstance",
"(",
"filename_or_handle",
",",
"util",
".",
"basestring",
")",
":",
"filename_or_handle",
"=",
"save_progress",
".",
"format",
"(",
"int",
"(",
"now",
")",
")",
"self",
".",
"save",
"(",
"filename_or_handle",
")",
"start",
"=",
"now"
] |
Train our network, one batch at a time.
This method yields a series of ``(train, valid)`` monitor pairs. The
``train`` value is a dictionary mapping names to monitor values
evaluated on the training dataset. The ``valid`` value is also a
dictionary mapping names to values, but these values are evaluated on
the validation dataset.
Because validation might not occur every training iteration, the
validation monitors might be repeated for multiple training iterations.
It is probably most helpful to think of the validation monitors as being
the "most recent" values that have been computed.
After training completes, the network attribute of this class will
contain the trained network parameters.
Parameters
----------
train : :class:`Dataset <downhill.dataset.Dataset>` or list
A dataset to use when training the network. If this is a
``downhill.Dataset`` instance, it will be used directly as the
training datset. If it is a list of numpy arrays or a list of
callables, it will be converted to a ``downhill.Dataset`` and then
used as the training set.
valid : :class:`Dataset <downhill.dataset.Dataset>` or list, optional
If this is provided, it will be used as a validation dataset. If not
provided, the training set will be used for validation. (This is not
recommended!)
algo : str, optional
An optimization algorithm to use for training our network. If not
provided, :class:`RMSProp <downhill.adaptive.RMSProp>` will be used.
subalgo : str, optional
An optimization algorithm to use for a trainer that requires a
"sub-algorithm," sugh as an unsupervised pretrainer. Defaults to
:class:`RMSProp <downhill.adaptive.RMSProp>`.
save_every : int or float, optional
If this is nonzero and ``save_progress`` is not None, then the model
being trained will be saved periodically. If this is a float, it is
treated as a number of minutes to wait between savings. If it is an
int, it is treated as the number of training epochs to wait between
savings. Defaults to 0.
save_progress : str or file handle, optional
If this is not None, and ``save_progress`` is nonzero, then save the
model periodically during training. This parameter gives either (a)
the full path of a file to save the model, or (b) a file-like object
where the model should be saved. If it is a string and the given
name contains a "{}" format specifier, it will be filled with the
integer Unix timestamp at the time the model is saved. Defaults to
None, which does not save models.
Yields
------
training : dict
A dictionary of monitor values computed using the training dataset,
at the conclusion of training. This dictionary will at least contain
a 'loss' key that indicates the value of the loss function. Other
keys may be available depending on the trainer being used.
validation : dict
A dictionary of monitor values computed using the validation
dataset, at the conclusion of training.
|
[
"Train",
"our",
"network",
"one",
"batch",
"at",
"a",
"time",
"."
] |
python
|
test
|
wmayner/pyphi
|
pyphi/subsystem.py
|
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/subsystem.py#L614-L621
|
def phi_cause_mip(self, mechanism, purview):
"""Return the |small_phi| of the cause MIP.
This is the distance between the unpartitioned cause repertoire and the
MIP cause repertoire.
"""
mip = self.cause_mip(mechanism, purview)
return mip.phi if mip else 0
|
[
"def",
"phi_cause_mip",
"(",
"self",
",",
"mechanism",
",",
"purview",
")",
":",
"mip",
"=",
"self",
".",
"cause_mip",
"(",
"mechanism",
",",
"purview",
")",
"return",
"mip",
".",
"phi",
"if",
"mip",
"else",
"0"
] |
Return the |small_phi| of the cause MIP.
This is the distance between the unpartitioned cause repertoire and the
MIP cause repertoire.
|
[
"Return",
"the",
"|small_phi|",
"of",
"the",
"cause",
"MIP",
"."
] |
python
|
train
|
markfinger/python-js-host
|
js_host/manager.py
|
https://github.com/markfinger/python-js-host/blob/7727138c1eae779335d55fb4d7734698225a6322/js_host/manager.py#L69-L83
|
def stop_host(self, config_file):
"""
Stops a managed host specified by `config_file`.
"""
res = self.send_json_request('host/stop', data={'config': config_file})
if res.status_code != 200:
raise UnexpectedResponse(
'Attempted to stop a JSHost. Response: {res_code}: {res_text}'.format(
res_code=res.status_code,
res_text=res.text,
)
)
return res.json()
|
[
"def",
"stop_host",
"(",
"self",
",",
"config_file",
")",
":",
"res",
"=",
"self",
".",
"send_json_request",
"(",
"'host/stop'",
",",
"data",
"=",
"{",
"'config'",
":",
"config_file",
"}",
")",
"if",
"res",
".",
"status_code",
"!=",
"200",
":",
"raise",
"UnexpectedResponse",
"(",
"'Attempted to stop a JSHost. Response: {res_code}: {res_text}'",
".",
"format",
"(",
"res_code",
"=",
"res",
".",
"status_code",
",",
"res_text",
"=",
"res",
".",
"text",
",",
")",
")",
"return",
"res",
".",
"json",
"(",
")"
] |
Stops a managed host specified by `config_file`.
|
[
"Stops",
"a",
"managed",
"host",
"specified",
"by",
"config_file",
"."
] |
python
|
train
|
zsethna/OLGA
|
olga/load_model.py
|
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/load_model.py#L499-L540
|
def read_igor_V_gene_parameters(params_file_name):
"""Load raw genV from file.
genV is a list of genomic V information. Each element is a list of three
elements. The first is the name of the V allele, the second is the genomic
sequence trimmed to the CDR3 region for productive sequences, and the last
is the full germline sequence. For this 'raw genV' the middle element is an
empty string to be filled in later.
Parameters
----------
params_file_name : str
File name for a IGOR parameter file.
Returns
-------
genV : list
List of genomic V information.
"""
params_file = open(params_file_name, 'r')
V_gene_info = {}
in_V_gene_sec = False
for line in params_file:
if line.startswith('#GeneChoice;V_gene;'):
in_V_gene_sec = True
elif in_V_gene_sec:
if line[0] == '%':
split_line = line[1:].split(';')
V_gene_info[split_line[0]] = [split_line[1] , int(split_line[2])]
else:
break
params_file.close()
genV = [[]]*len(V_gene_info.keys())
for V_gene in V_gene_info.keys():
genV[V_gene_info[V_gene][1]] = [V_gene, '', V_gene_info[V_gene][0]]
return genV
|
[
"def",
"read_igor_V_gene_parameters",
"(",
"params_file_name",
")",
":",
"params_file",
"=",
"open",
"(",
"params_file_name",
",",
"'r'",
")",
"V_gene_info",
"=",
"{",
"}",
"in_V_gene_sec",
"=",
"False",
"for",
"line",
"in",
"params_file",
":",
"if",
"line",
".",
"startswith",
"(",
"'#GeneChoice;V_gene;'",
")",
":",
"in_V_gene_sec",
"=",
"True",
"elif",
"in_V_gene_sec",
":",
"if",
"line",
"[",
"0",
"]",
"==",
"'%'",
":",
"split_line",
"=",
"line",
"[",
"1",
":",
"]",
".",
"split",
"(",
"';'",
")",
"V_gene_info",
"[",
"split_line",
"[",
"0",
"]",
"]",
"=",
"[",
"split_line",
"[",
"1",
"]",
",",
"int",
"(",
"split_line",
"[",
"2",
"]",
")",
"]",
"else",
":",
"break",
"params_file",
".",
"close",
"(",
")",
"genV",
"=",
"[",
"[",
"]",
"]",
"*",
"len",
"(",
"V_gene_info",
".",
"keys",
"(",
")",
")",
"for",
"V_gene",
"in",
"V_gene_info",
".",
"keys",
"(",
")",
":",
"genV",
"[",
"V_gene_info",
"[",
"V_gene",
"]",
"[",
"1",
"]",
"]",
"=",
"[",
"V_gene",
",",
"''",
",",
"V_gene_info",
"[",
"V_gene",
"]",
"[",
"0",
"]",
"]",
"return",
"genV"
] |
Load raw genV from file.
genV is a list of genomic V information. Each element is a list of three
elements. The first is the name of the V allele, the second is the genomic
sequence trimmed to the CDR3 region for productive sequences, and the last
is the full germline sequence. For this 'raw genV' the middle element is an
empty string to be filled in later.
Parameters
----------
params_file_name : str
File name for a IGOR parameter file.
Returns
-------
genV : list
List of genomic V information.
|
[
"Load",
"raw",
"genV",
"from",
"file",
".",
"genV",
"is",
"a",
"list",
"of",
"genomic",
"V",
"information",
".",
"Each",
"element",
"is",
"a",
"list",
"of",
"three",
"elements",
".",
"The",
"first",
"is",
"the",
"name",
"of",
"the",
"V",
"allele",
"the",
"second",
"is",
"the",
"genomic",
"sequence",
"trimmed",
"to",
"the",
"CDR3",
"region",
"for",
"productive",
"sequences",
"and",
"the",
"last",
"is",
"the",
"full",
"germline",
"sequence",
".",
"For",
"this",
"raw",
"genV",
"the",
"middle",
"element",
"is",
"an",
"empty",
"string",
"to",
"be",
"filled",
"in",
"later",
"."
] |
python
|
train
|
glue-viz/glue-vispy-viewers
|
glue_vispy_viewers/extern/vispy/gloo/gl/__init__.py
|
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/gl/__init__.py#L65-L75
|
def _arg_repr(self, arg):
""" Get a useful (and not too large) represetation of an argument.
"""
r = repr(arg)
max = 40
if len(r) > max:
if hasattr(arg, 'shape'):
r = 'array:' + 'x'.join([repr(s) for s in arg.shape])
else:
r = r[:max-3] + '...'
return r
|
[
"def",
"_arg_repr",
"(",
"self",
",",
"arg",
")",
":",
"r",
"=",
"repr",
"(",
"arg",
")",
"max",
"=",
"40",
"if",
"len",
"(",
"r",
")",
">",
"max",
":",
"if",
"hasattr",
"(",
"arg",
",",
"'shape'",
")",
":",
"r",
"=",
"'array:'",
"+",
"'x'",
".",
"join",
"(",
"[",
"repr",
"(",
"s",
")",
"for",
"s",
"in",
"arg",
".",
"shape",
"]",
")",
"else",
":",
"r",
"=",
"r",
"[",
":",
"max",
"-",
"3",
"]",
"+",
"'...'",
"return",
"r"
] |
Get a useful (and not too large) represetation of an argument.
|
[
"Get",
"a",
"useful",
"(",
"and",
"not",
"too",
"large",
")",
"represetation",
"of",
"an",
"argument",
"."
] |
python
|
train
|
inspirehep/harvesting-kit
|
harvestingkit/inspire_cds_package/from_inspire.py
|
https://github.com/inspirehep/harvesting-kit/blob/33a7f8aa9dade1d863110c6d8b27dfd955cb471f/harvestingkit/inspire_cds_package/from_inspire.py#L257-L266
|
def update_hidden_notes(self):
"""Remove hidden notes and tag a CERN if detected."""
if not self.tag_as_cern:
notes = record_get_field_instances(self.record,
tag="595")
for field in notes:
for dummy, value in field[0]:
if value == "CDS":
self.tag_as_cern = True
record_delete_fields(self.record, tag="595")
|
[
"def",
"update_hidden_notes",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"tag_as_cern",
":",
"notes",
"=",
"record_get_field_instances",
"(",
"self",
".",
"record",
",",
"tag",
"=",
"\"595\"",
")",
"for",
"field",
"in",
"notes",
":",
"for",
"dummy",
",",
"value",
"in",
"field",
"[",
"0",
"]",
":",
"if",
"value",
"==",
"\"CDS\"",
":",
"self",
".",
"tag_as_cern",
"=",
"True",
"record_delete_fields",
"(",
"self",
".",
"record",
",",
"tag",
"=",
"\"595\"",
")"
] |
Remove hidden notes and tag a CERN if detected.
|
[
"Remove",
"hidden",
"notes",
"and",
"tag",
"a",
"CERN",
"if",
"detected",
"."
] |
python
|
valid
|
explosion/spaCy
|
spacy/displacy/__init__.py
|
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/__init__.py#L64-L102
|
def serve(
docs,
style="dep",
page=True,
minify=False,
options={},
manual=False,
port=5000,
host="0.0.0.0",
):
"""Serve displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
port (int): Port to serve visualisation.
host (unicode): Host to serve visualisation.
DOCS: https://spacy.io/api/top-level#displacy.serve
USAGE: https://spacy.io/usage/visualizers
"""
from wsgiref import simple_server
if is_in_jupyter():
user_warning(Warnings.W011)
render(docs, style=style, page=page, minify=minify, options=options, manual=manual)
httpd = simple_server.make_server(host, port, app)
print("\nUsing the '{}' visualizer".format(style))
print("Serving on http://{}:{} ...\n".format(host, port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("Shutting down server on port {}.".format(port))
finally:
httpd.server_close()
|
[
"def",
"serve",
"(",
"docs",
",",
"style",
"=",
"\"dep\"",
",",
"page",
"=",
"True",
",",
"minify",
"=",
"False",
",",
"options",
"=",
"{",
"}",
",",
"manual",
"=",
"False",
",",
"port",
"=",
"5000",
",",
"host",
"=",
"\"0.0.0.0\"",
",",
")",
":",
"from",
"wsgiref",
"import",
"simple_server",
"if",
"is_in_jupyter",
"(",
")",
":",
"user_warning",
"(",
"Warnings",
".",
"W011",
")",
"render",
"(",
"docs",
",",
"style",
"=",
"style",
",",
"page",
"=",
"page",
",",
"minify",
"=",
"minify",
",",
"options",
"=",
"options",
",",
"manual",
"=",
"manual",
")",
"httpd",
"=",
"simple_server",
".",
"make_server",
"(",
"host",
",",
"port",
",",
"app",
")",
"print",
"(",
"\"\\nUsing the '{}' visualizer\"",
".",
"format",
"(",
"style",
")",
")",
"print",
"(",
"\"Serving on http://{}:{} ...\\n\"",
".",
"format",
"(",
"host",
",",
"port",
")",
")",
"try",
":",
"httpd",
".",
"serve_forever",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"\"Shutting down server on port {}.\"",
".",
"format",
"(",
"port",
")",
")",
"finally",
":",
"httpd",
".",
"server_close",
"(",
")"
] |
Serve displaCy visualisation.
docs (list or Doc): Document(s) to visualise.
style (unicode): Visualisation style, 'dep' or 'ent'.
page (bool): Render markup as full HTML page.
minify (bool): Minify HTML markup.
options (dict): Visualiser-specific options, e.g. colors.
manual (bool): Don't parse `Doc` and instead expect a dict/list of dicts.
port (int): Port to serve visualisation.
host (unicode): Host to serve visualisation.
DOCS: https://spacy.io/api/top-level#displacy.serve
USAGE: https://spacy.io/usage/visualizers
|
[
"Serve",
"displaCy",
"visualisation",
"."
] |
python
|
train
|
jonbretman/jinja-to-js
|
jinja_to_js/__init__.py
|
https://github.com/jonbretman/jinja-to-js/blob/0a784b10a83d37a3171c5797547e9fc460c51289/jinja_to_js/__init__.py#L424-L436
|
def _process_getattr(self, node, **kwargs):
"""
Processes a `GetAttr` node. e.g. {{ foo.bar }}
"""
with self._interpolation():
with self._python_bool_wrapper(**kwargs) as new_kwargs:
if is_loop_helper(node):
self._process_loop_helper(node, **new_kwargs)
else:
self._process_node(node.node, **new_kwargs)
self.output.write('.')
self.output.write(node.attr)
|
[
"def",
"_process_getattr",
"(",
"self",
",",
"node",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"self",
".",
"_interpolation",
"(",
")",
":",
"with",
"self",
".",
"_python_bool_wrapper",
"(",
"*",
"*",
"kwargs",
")",
"as",
"new_kwargs",
":",
"if",
"is_loop_helper",
"(",
"node",
")",
":",
"self",
".",
"_process_loop_helper",
"(",
"node",
",",
"*",
"*",
"new_kwargs",
")",
"else",
":",
"self",
".",
"_process_node",
"(",
"node",
".",
"node",
",",
"*",
"*",
"new_kwargs",
")",
"self",
".",
"output",
".",
"write",
"(",
"'.'",
")",
"self",
".",
"output",
".",
"write",
"(",
"node",
".",
"attr",
")"
] |
Processes a `GetAttr` node. e.g. {{ foo.bar }}
|
[
"Processes",
"a",
"GetAttr",
"node",
".",
"e",
".",
"g",
".",
"{{",
"foo",
".",
"bar",
"}}"
] |
python
|
train
|
fstab50/metal
|
metal/cli.py
|
https://github.com/fstab50/metal/blob/0488bbdd516a508909267cc44191f632e21156ba/metal/cli.py#L123-L138
|
def set(self, mode, disable):
""" create logger object, enable or disable logging """
global logger
try:
if logger:
if disable:
logger.disabled = True
else:
if mode in ('STREAM', 'FILE'):
logger = logd.getLogger(mode, __version__)
except Exception as e:
logger.exception(
'%s: Problem incurred during logging setup' % inspect.stack()[0][3]
)
return False
return True
|
[
"def",
"set",
"(",
"self",
",",
"mode",
",",
"disable",
")",
":",
"global",
"logger",
"try",
":",
"if",
"logger",
":",
"if",
"disable",
":",
"logger",
".",
"disabled",
"=",
"True",
"else",
":",
"if",
"mode",
"in",
"(",
"'STREAM'",
",",
"'FILE'",
")",
":",
"logger",
"=",
"logd",
".",
"getLogger",
"(",
"mode",
",",
"__version__",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"'%s: Problem incurred during logging setup'",
"%",
"inspect",
".",
"stack",
"(",
")",
"[",
"0",
"]",
"[",
"3",
"]",
")",
"return",
"False",
"return",
"True"
] |
create logger object, enable or disable logging
|
[
"create",
"logger",
"object",
"enable",
"or",
"disable",
"logging"
] |
python
|
train
|
inspirehep/inspire-schemas
|
inspire_schemas/builders/authors.py
|
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/authors.py#L135-L156
|
def add_email_address(self, email, hidden=None):
"""Add email address.
Args:
:param email: email of the author.
:type email: string
:param hidden: if email is public or not.
:type hidden: boolean
"""
existing_emails = get_value(self.obj, 'email_addresses', [])
found_email = next(
(existing_email for existing_email in existing_emails if existing_email.get('value') == email),
None
)
if found_email is None:
new_email = {'value': email}
if hidden is not None:
new_email['hidden'] = hidden
self._append_to('email_addresses', new_email)
elif hidden is not None:
found_email['hidden'] = hidden
|
[
"def",
"add_email_address",
"(",
"self",
",",
"email",
",",
"hidden",
"=",
"None",
")",
":",
"existing_emails",
"=",
"get_value",
"(",
"self",
".",
"obj",
",",
"'email_addresses'",
",",
"[",
"]",
")",
"found_email",
"=",
"next",
"(",
"(",
"existing_email",
"for",
"existing_email",
"in",
"existing_emails",
"if",
"existing_email",
".",
"get",
"(",
"'value'",
")",
"==",
"email",
")",
",",
"None",
")",
"if",
"found_email",
"is",
"None",
":",
"new_email",
"=",
"{",
"'value'",
":",
"email",
"}",
"if",
"hidden",
"is",
"not",
"None",
":",
"new_email",
"[",
"'hidden'",
"]",
"=",
"hidden",
"self",
".",
"_append_to",
"(",
"'email_addresses'",
",",
"new_email",
")",
"elif",
"hidden",
"is",
"not",
"None",
":",
"found_email",
"[",
"'hidden'",
"]",
"=",
"hidden"
] |
Add email address.
Args:
:param email: email of the author.
:type email: string
:param hidden: if email is public or not.
:type hidden: boolean
|
[
"Add",
"email",
"address",
"."
] |
python
|
train
|
brainiak/brainiak
|
brainiak/factoranalysis/htfa.py
|
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/htfa.py#L811-L841
|
def fit(self, X, R):
"""Compute Hierarchical Topographical Factor Analysis Model
[Manning2014-1][Manning2014-2]
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
"""
self._check_input(X, R)
if self.verbose:
logger.info("Start to fit HTFA")
self.n_dim = R[0].shape[1]
self.cov_vec_size = np.sum(np.arange(self.n_dim) + 1)
# centers,widths
self.prior_size = self.K * (self.n_dim + 1)
# centers,widths,centerCov,widthVar
self.prior_bcast_size =\
self.K * (self.n_dim + 2 + self.cov_vec_size)
self.get_map_offset()
self._fit_htfa(X, R)
return self
|
[
"def",
"fit",
"(",
"self",
",",
"X",
",",
"R",
")",
":",
"self",
".",
"_check_input",
"(",
"X",
",",
"R",
")",
"if",
"self",
".",
"verbose",
":",
"logger",
".",
"info",
"(",
"\"Start to fit HTFA\"",
")",
"self",
".",
"n_dim",
"=",
"R",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
"self",
".",
"cov_vec_size",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"arange",
"(",
"self",
".",
"n_dim",
")",
"+",
"1",
")",
"# centers,widths",
"self",
".",
"prior_size",
"=",
"self",
".",
"K",
"*",
"(",
"self",
".",
"n_dim",
"+",
"1",
")",
"# centers,widths,centerCov,widthVar",
"self",
".",
"prior_bcast_size",
"=",
"self",
".",
"K",
"*",
"(",
"self",
".",
"n_dim",
"+",
"2",
"+",
"self",
".",
"cov_vec_size",
")",
"self",
".",
"get_map_offset",
"(",
")",
"self",
".",
"_fit_htfa",
"(",
"X",
",",
"R",
")",
"return",
"self"
] |
Compute Hierarchical Topographical Factor Analysis Model
[Manning2014-1][Manning2014-2]
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
R : list of 2D arrays, element i has shape=[n_voxel, n_dim]
Each element in the list contains the scanner coordinate matrix
of fMRI data of one subject.
Returns
-------
HTFA
Returns the instance itself.
|
[
"Compute",
"Hierarchical",
"Topographical",
"Factor",
"Analysis",
"Model",
"[",
"Manning2014",
"-",
"1",
"]",
"[",
"Manning2014",
"-",
"2",
"]"
] |
python
|
train
|
googleapis/google-cloud-python
|
bigtable/google/cloud/bigtable/table.py
|
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/table.py#L337-L365
|
def read_row(self, row_key, filter_=None):
"""Read a single row from this table.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_read_row]
:end-before: [END bigtable_read_row]
:type row_key: bytes
:param row_key: The key of the row to read from.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
row. If unset, returns the entire row.
:rtype: :class:`.PartialRowData`, :data:`NoneType <types.NoneType>`
:returns: The contents of the row if any chunks were returned in
the response, otherwise :data:`None`.
:raises: :class:`ValueError <exceptions.ValueError>` if a commit row
chunk is never encountered.
"""
row_set = RowSet()
row_set.add_row_key(row_key)
result_iter = iter(self.read_rows(filter_=filter_, row_set=row_set))
row = next(result_iter, None)
if next(result_iter, None) is not None:
raise ValueError("More than one row was returned.")
return row
|
[
"def",
"read_row",
"(",
"self",
",",
"row_key",
",",
"filter_",
"=",
"None",
")",
":",
"row_set",
"=",
"RowSet",
"(",
")",
"row_set",
".",
"add_row_key",
"(",
"row_key",
")",
"result_iter",
"=",
"iter",
"(",
"self",
".",
"read_rows",
"(",
"filter_",
"=",
"filter_",
",",
"row_set",
"=",
"row_set",
")",
")",
"row",
"=",
"next",
"(",
"result_iter",
",",
"None",
")",
"if",
"next",
"(",
"result_iter",
",",
"None",
")",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"More than one row was returned.\"",
")",
"return",
"row"
] |
Read a single row from this table.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_read_row]
:end-before: [END bigtable_read_row]
:type row_key: bytes
:param row_key: The key of the row to read from.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
row. If unset, returns the entire row.
:rtype: :class:`.PartialRowData`, :data:`NoneType <types.NoneType>`
:returns: The contents of the row if any chunks were returned in
the response, otherwise :data:`None`.
:raises: :class:`ValueError <exceptions.ValueError>` if a commit row
chunk is never encountered.
|
[
"Read",
"a",
"single",
"row",
"from",
"this",
"table",
"."
] |
python
|
train
|
tk0miya/tk.phpautodoc
|
src/phply/phpparse.py
|
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L328-L333
|
def p_new_else_single(p):
'''new_else_single : empty
| ELSE COLON inner_statement_list'''
if len(p) == 4:
p[0] = ast.Else(ast.Block(p[3], lineno=p.lineno(2)),
lineno=p.lineno(1))
|
[
"def",
"p_new_else_single",
"(",
"p",
")",
":",
"if",
"len",
"(",
"p",
")",
"==",
"4",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"Else",
"(",
"ast",
".",
"Block",
"(",
"p",
"[",
"3",
"]",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"2",
")",
")",
",",
"lineno",
"=",
"p",
".",
"lineno",
"(",
"1",
")",
")"
] |
new_else_single : empty
| ELSE COLON inner_statement_list
|
[
"new_else_single",
":",
"empty",
"|",
"ELSE",
"COLON",
"inner_statement_list"
] |
python
|
train
|
ryanjdillon/pylleo
|
pylleo/lleoio.py
|
https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleoio.py#L106-L240
|
def read_data(meta, path_dir, sample_f=1, decimate=False, overwrite=False):
'''Read accelerometry data from leonardo txt files
Args
----
meta: dict
Dictionary of meta data from header lines of lleo data files
path_dir: str
Parent directory containing lleo data files
sample_f: int
Return every `sample_f` data points
Returns
-------
acc: pandas.DataFrame
Dataframe containing accelerometry data on x, y, z axes [m/s^2]
depth: pandas.DataFrame
Dataframe containing depth data [m]
prop: pandas.DataFrame
Dataframe containing speed data from propeller
temp: pandas.DataFrame
Dataframe containing temperature data
'''
import os
import pandas
from . import utils
def _generate_datetimes(date, time, interval_s, n_timestamps):
'''Generate list of datetimes from date/time with given interval'''
from datetime import datetime, timedelta
import pandas
# TODO problematic if both m/d d/m options
fmts = ['%Y/%m/%d %H%M%S',
'%d/%m/%Y %H%M%S',
'%m/%d/%Y %I%M%S %p',
'%d/%m/%Y %I%M%S %p',]
for fmt in fmts:
try:
start = pandas.to_datetime('{} {}'.format(date,time), format=fmt)
except:
print('Date format {:18} incorrect, '
'trying next...'.format(fmt))
else:
print('Date format {:18} correct.'.format(fmt))
break
# Create datetime array
datetimes = list()
for i in range(n_timestamps):
secs = interval_s*i
datetimes.append(start + timedelta(seconds=secs))
return datetimes
def _read_data_file(meta, path_dir, param_str):
'''Read single Little Leonardo txt data file'''
import numpy
import os
import pandas
from . import utils
# Get path of data file and associated pickle file
path_file = utils.find_file(path_dir, param_str, '.TXT')
col_name = utils.posix_string(param_str)
# Get number of header rows in file
enc = utils.predict_encoding(path_file, n_lines=20)
with open(path_file, 'r', encoding=enc) as f:
n_header = utils.get_n_header(f)
print('\nReading: {}'.format(col_name))
data = numpy.genfromtxt(path_file, skip_header=n_header)
interval_s = float(meta['parameters'][col_name]['Interval(Sec)'])
date = meta['parameters'][col_name]['Start date']
time = meta['parameters'][col_name]['Start time']
# TODO review
# Generate summed data if propeller sampling rate not 1
if (col_name == 'propeller') and (interval_s < 1):
print('Too high sampling interval, taking sums')
# Sampling rate
fs = int(1/interval_s)
print('data before', data.max())
# Drop elements to make divisible by fs for summing
data = data[:-int(len(data)%fs)]
# Reshape to 2D with columns `fs` in length to be summed
data = data.reshape(fs, int(len(data)/fs))
data = numpy.sum(data, axis=0)
interval_s = 1
print('data after', data.max())
datetimes = _generate_datetimes(date, time, interval_s, len(data))
data = numpy.vstack((datetimes, data)).T
df = pandas.DataFrame(data, columns=['datetimes', col_name])
return df
# Get list of string parameter names for tag model
param_names = utils.get_tag_params(meta['tag_model'])
# Load pickle file exists and code unchanged
pickle_file = os.path.join(path_dir, 'pydata_'+meta['experiment']+'.p')
# Load or create pandas DataFrame with parameters associated with tag model
if (os.path.exists(pickle_file)) and (overwrite is not True):
data_df = pandas.read_pickle(pickle_file)
else:
first_col = True
for name in param_names:
next_df = _read_data_file(meta, path_dir, name)
if first_col == False:
data_df = pandas.merge(data_df, next_df, on='datetimes', how='left')
else:
data_df = next_df
first_col = False
print('')
# Covert columns to `datetime64` or `float64` types
data_df = data_df.apply(lambda x: pandas.to_numeric(x, errors='ignore'))
# Save file to pickle
data_df.to_pickle(pickle_file)
# Return DataFrame with ever `sample_f` values
return data_df.iloc[::sample_f,:]
|
[
"def",
"read_data",
"(",
"meta",
",",
"path_dir",
",",
"sample_f",
"=",
"1",
",",
"decimate",
"=",
"False",
",",
"overwrite",
"=",
"False",
")",
":",
"import",
"os",
"import",
"pandas",
"from",
".",
"import",
"utils",
"def",
"_generate_datetimes",
"(",
"date",
",",
"time",
",",
"interval_s",
",",
"n_timestamps",
")",
":",
"'''Generate list of datetimes from date/time with given interval'''",
"from",
"datetime",
"import",
"datetime",
",",
"timedelta",
"import",
"pandas",
"# TODO problematic if both m/d d/m options",
"fmts",
"=",
"[",
"'%Y/%m/%d %H%M%S'",
",",
"'%d/%m/%Y %H%M%S'",
",",
"'%m/%d/%Y %I%M%S %p'",
",",
"'%d/%m/%Y %I%M%S %p'",
",",
"]",
"for",
"fmt",
"in",
"fmts",
":",
"try",
":",
"start",
"=",
"pandas",
".",
"to_datetime",
"(",
"'{} {}'",
".",
"format",
"(",
"date",
",",
"time",
")",
",",
"format",
"=",
"fmt",
")",
"except",
":",
"print",
"(",
"'Date format {:18} incorrect, '",
"'trying next...'",
".",
"format",
"(",
"fmt",
")",
")",
"else",
":",
"print",
"(",
"'Date format {:18} correct.'",
".",
"format",
"(",
"fmt",
")",
")",
"break",
"# Create datetime array",
"datetimes",
"=",
"list",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"n_timestamps",
")",
":",
"secs",
"=",
"interval_s",
"*",
"i",
"datetimes",
".",
"append",
"(",
"start",
"+",
"timedelta",
"(",
"seconds",
"=",
"secs",
")",
")",
"return",
"datetimes",
"def",
"_read_data_file",
"(",
"meta",
",",
"path_dir",
",",
"param_str",
")",
":",
"'''Read single Little Leonardo txt data file'''",
"import",
"numpy",
"import",
"os",
"import",
"pandas",
"from",
".",
"import",
"utils",
"# Get path of data file and associated pickle file",
"path_file",
"=",
"utils",
".",
"find_file",
"(",
"path_dir",
",",
"param_str",
",",
"'.TXT'",
")",
"col_name",
"=",
"utils",
".",
"posix_string",
"(",
"param_str",
")",
"# Get number of header rows in file",
"enc",
"=",
"utils",
".",
"predict_encoding",
"(",
"path_file",
",",
"n_lines",
"=",
"20",
")",
"with",
"open",
"(",
"path_file",
",",
"'r'",
",",
"encoding",
"=",
"enc",
")",
"as",
"f",
":",
"n_header",
"=",
"utils",
".",
"get_n_header",
"(",
"f",
")",
"print",
"(",
"'\\nReading: {}'",
".",
"format",
"(",
"col_name",
")",
")",
"data",
"=",
"numpy",
".",
"genfromtxt",
"(",
"path_file",
",",
"skip_header",
"=",
"n_header",
")",
"interval_s",
"=",
"float",
"(",
"meta",
"[",
"'parameters'",
"]",
"[",
"col_name",
"]",
"[",
"'Interval(Sec)'",
"]",
")",
"date",
"=",
"meta",
"[",
"'parameters'",
"]",
"[",
"col_name",
"]",
"[",
"'Start date'",
"]",
"time",
"=",
"meta",
"[",
"'parameters'",
"]",
"[",
"col_name",
"]",
"[",
"'Start time'",
"]",
"# TODO review",
"# Generate summed data if propeller sampling rate not 1",
"if",
"(",
"col_name",
"==",
"'propeller'",
")",
"and",
"(",
"interval_s",
"<",
"1",
")",
":",
"print",
"(",
"'Too high sampling interval, taking sums'",
")",
"# Sampling rate",
"fs",
"=",
"int",
"(",
"1",
"/",
"interval_s",
")",
"print",
"(",
"'data before'",
",",
"data",
".",
"max",
"(",
")",
")",
"# Drop elements to make divisible by fs for summing",
"data",
"=",
"data",
"[",
":",
"-",
"int",
"(",
"len",
"(",
"data",
")",
"%",
"fs",
")",
"]",
"# Reshape to 2D with columns `fs` in length to be summed",
"data",
"=",
"data",
".",
"reshape",
"(",
"fs",
",",
"int",
"(",
"len",
"(",
"data",
")",
"/",
"fs",
")",
")",
"data",
"=",
"numpy",
".",
"sum",
"(",
"data",
",",
"axis",
"=",
"0",
")",
"interval_s",
"=",
"1",
"print",
"(",
"'data after'",
",",
"data",
".",
"max",
"(",
")",
")",
"datetimes",
"=",
"_generate_datetimes",
"(",
"date",
",",
"time",
",",
"interval_s",
",",
"len",
"(",
"data",
")",
")",
"data",
"=",
"numpy",
".",
"vstack",
"(",
"(",
"datetimes",
",",
"data",
")",
")",
".",
"T",
"df",
"=",
"pandas",
".",
"DataFrame",
"(",
"data",
",",
"columns",
"=",
"[",
"'datetimes'",
",",
"col_name",
"]",
")",
"return",
"df",
"# Get list of string parameter names for tag model",
"param_names",
"=",
"utils",
".",
"get_tag_params",
"(",
"meta",
"[",
"'tag_model'",
"]",
")",
"# Load pickle file exists and code unchanged",
"pickle_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_dir",
",",
"'pydata_'",
"+",
"meta",
"[",
"'experiment'",
"]",
"+",
"'.p'",
")",
"# Load or create pandas DataFrame with parameters associated with tag model",
"if",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"pickle_file",
")",
")",
"and",
"(",
"overwrite",
"is",
"not",
"True",
")",
":",
"data_df",
"=",
"pandas",
".",
"read_pickle",
"(",
"pickle_file",
")",
"else",
":",
"first_col",
"=",
"True",
"for",
"name",
"in",
"param_names",
":",
"next_df",
"=",
"_read_data_file",
"(",
"meta",
",",
"path_dir",
",",
"name",
")",
"if",
"first_col",
"==",
"False",
":",
"data_df",
"=",
"pandas",
".",
"merge",
"(",
"data_df",
",",
"next_df",
",",
"on",
"=",
"'datetimes'",
",",
"how",
"=",
"'left'",
")",
"else",
":",
"data_df",
"=",
"next_df",
"first_col",
"=",
"False",
"print",
"(",
"''",
")",
"# Covert columns to `datetime64` or `float64` types",
"data_df",
"=",
"data_df",
".",
"apply",
"(",
"lambda",
"x",
":",
"pandas",
".",
"to_numeric",
"(",
"x",
",",
"errors",
"=",
"'ignore'",
")",
")",
"# Save file to pickle",
"data_df",
".",
"to_pickle",
"(",
"pickle_file",
")",
"# Return DataFrame with ever `sample_f` values",
"return",
"data_df",
".",
"iloc",
"[",
":",
":",
"sample_f",
",",
":",
"]"
] |
Read accelerometry data from leonardo txt files
Args
----
meta: dict
Dictionary of meta data from header lines of lleo data files
path_dir: str
Parent directory containing lleo data files
sample_f: int
Return every `sample_f` data points
Returns
-------
acc: pandas.DataFrame
Dataframe containing accelerometry data on x, y, z axes [m/s^2]
depth: pandas.DataFrame
Dataframe containing depth data [m]
prop: pandas.DataFrame
Dataframe containing speed data from propeller
temp: pandas.DataFrame
Dataframe containing temperature data
|
[
"Read",
"accelerometry",
"data",
"from",
"leonardo",
"txt",
"files"
] |
python
|
train
|
Zsailer/phylopandas
|
phylopandas/seqio/read.py
|
https://github.com/Zsailer/phylopandas/blob/f163c4a2b9369eb32f6c8f3793f711f6fe4e6130/phylopandas/seqio/read.py#L119-L139
|
def _read_function(schema):
"""Add a write method for named schema to a class.
"""
def func(
filename,
seq_label='sequence',
alphabet=None,
use_uids=True,
**kwargs):
# Use generic write class to write data.
return _read(
filename=filename,
schema=schema,
seq_label=seq_label,
alphabet=alphabet,
use_uids=use_uids,
**kwargs
)
# Update docs
func.__doc__ = _read_doc_template(schema)
return func
|
[
"def",
"_read_function",
"(",
"schema",
")",
":",
"def",
"func",
"(",
"filename",
",",
"seq_label",
"=",
"'sequence'",
",",
"alphabet",
"=",
"None",
",",
"use_uids",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# Use generic write class to write data.",
"return",
"_read",
"(",
"filename",
"=",
"filename",
",",
"schema",
"=",
"schema",
",",
"seq_label",
"=",
"seq_label",
",",
"alphabet",
"=",
"alphabet",
",",
"use_uids",
"=",
"use_uids",
",",
"*",
"*",
"kwargs",
")",
"# Update docs",
"func",
".",
"__doc__",
"=",
"_read_doc_template",
"(",
"schema",
")",
"return",
"func"
] |
Add a write method for named schema to a class.
|
[
"Add",
"a",
"write",
"method",
"for",
"named",
"schema",
"to",
"a",
"class",
"."
] |
python
|
train
|
saltstack/salt
|
salt/modules/virt.py
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L2215-L2255
|
def vm_state(vm_=None, **kwargs):
'''
Return list of all the vms and their state.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.vm_state <domain>
'''
def _info(dom):
'''
Compute domain state
'''
state = ''
raw = dom.info()
state = VIRT_STATE_NAME_MAP.get(raw[0], 'unknown')
return state
info = {}
conn = __get_conn(**kwargs)
if vm_:
info[vm_] = _info(_get_domain(conn, vm_))
else:
for domain in _get_domain(conn, iterable=True):
info[domain.name()] = _info(domain)
conn.close()
return info
|
[
"def",
"vm_state",
"(",
"vm_",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"_info",
"(",
"dom",
")",
":",
"'''\n Compute domain state\n '''",
"state",
"=",
"''",
"raw",
"=",
"dom",
".",
"info",
"(",
")",
"state",
"=",
"VIRT_STATE_NAME_MAP",
".",
"get",
"(",
"raw",
"[",
"0",
"]",
",",
"'unknown'",
")",
"return",
"state",
"info",
"=",
"{",
"}",
"conn",
"=",
"__get_conn",
"(",
"*",
"*",
"kwargs",
")",
"if",
"vm_",
":",
"info",
"[",
"vm_",
"]",
"=",
"_info",
"(",
"_get_domain",
"(",
"conn",
",",
"vm_",
")",
")",
"else",
":",
"for",
"domain",
"in",
"_get_domain",
"(",
"conn",
",",
"iterable",
"=",
"True",
")",
":",
"info",
"[",
"domain",
".",
"name",
"(",
")",
"]",
"=",
"_info",
"(",
"domain",
")",
"conn",
".",
"close",
"(",
")",
"return",
"info"
] |
Return list of all the vms and their state.
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
:param vm_: name of the domain
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.vm_state <domain>
|
[
"Return",
"list",
"of",
"all",
"the",
"vms",
"and",
"their",
"state",
"."
] |
python
|
train
|
LIVVkit/LIVVkit
|
livvkit/components/performance.py
|
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L312-L354
|
def generate_scaling_plot(timing_data, title, ylabel, description, plot_file):
"""
Generate a scaling plot.
Args:
timing_data: data returned from a `*_scaling` method
title: the title of the plot
ylabel: the y-axis label of the plot
description: a description of the plot
plot_file: the file to write out to
Returns:
an image element containing the plot file and metadata
"""
proc_counts = timing_data['proc_counts']
if len(proc_counts) > 2:
plt.figure(figsize=(10, 8), dpi=150)
plt.title(title)
plt.xlabel("Number of processors")
plt.ylabel(ylabel)
for case, case_color in zip(['bench', 'model'], ['#91bfdb', '#fc8d59']):
case_data = timing_data[case]
means = case_data['means']
mins = case_data['mins']
maxs = case_data['maxs']
plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5)
plt.plot(proc_counts, means, 'o-', color=case_color, label=case)
plt.legend(loc='best')
else:
plt.figure(figsize=(5, 3))
plt.axis('off')
plt.text(0.4, 0.8, "ERROR:")
plt.text(0.0, 0.6, "Not enough data points to draw scaling plot")
plt.text(0.0, 0.44, "To generate this data rerun BATS with the")
plt.text(0.0, 0.36, "performance option enabled.")
if livvkit.publish:
plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)
plt.savefig(plot_file)
plt.close()
return elements.image(title, description, os.path.basename(plot_file))
|
[
"def",
"generate_scaling_plot",
"(",
"timing_data",
",",
"title",
",",
"ylabel",
",",
"description",
",",
"plot_file",
")",
":",
"proc_counts",
"=",
"timing_data",
"[",
"'proc_counts'",
"]",
"if",
"len",
"(",
"proc_counts",
")",
">",
"2",
":",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"10",
",",
"8",
")",
",",
"dpi",
"=",
"150",
")",
"plt",
".",
"title",
"(",
"title",
")",
"plt",
".",
"xlabel",
"(",
"\"Number of processors\"",
")",
"plt",
".",
"ylabel",
"(",
"ylabel",
")",
"for",
"case",
",",
"case_color",
"in",
"zip",
"(",
"[",
"'bench'",
",",
"'model'",
"]",
",",
"[",
"'#91bfdb'",
",",
"'#fc8d59'",
"]",
")",
":",
"case_data",
"=",
"timing_data",
"[",
"case",
"]",
"means",
"=",
"case_data",
"[",
"'means'",
"]",
"mins",
"=",
"case_data",
"[",
"'mins'",
"]",
"maxs",
"=",
"case_data",
"[",
"'maxs'",
"]",
"plt",
".",
"fill_between",
"(",
"proc_counts",
",",
"mins",
",",
"maxs",
",",
"facecolor",
"=",
"case_color",
",",
"alpha",
"=",
"0.5",
")",
"plt",
".",
"plot",
"(",
"proc_counts",
",",
"means",
",",
"'o-'",
",",
"color",
"=",
"case_color",
",",
"label",
"=",
"case",
")",
"plt",
".",
"legend",
"(",
"loc",
"=",
"'best'",
")",
"else",
":",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"5",
",",
"3",
")",
")",
"plt",
".",
"axis",
"(",
"'off'",
")",
"plt",
".",
"text",
"(",
"0.4",
",",
"0.8",
",",
"\"ERROR:\"",
")",
"plt",
".",
"text",
"(",
"0.0",
",",
"0.6",
",",
"\"Not enough data points to draw scaling plot\"",
")",
"plt",
".",
"text",
"(",
"0.0",
",",
"0.44",
",",
"\"To generate this data rerun BATS with the\"",
")",
"plt",
".",
"text",
"(",
"0.0",
",",
"0.36",
",",
"\"performance option enabled.\"",
")",
"if",
"livvkit",
".",
"publish",
":",
"plt",
".",
"savefig",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"plot_file",
")",
"[",
"0",
"]",
"+",
"'.eps'",
",",
"dpi",
"=",
"600",
")",
"plt",
".",
"savefig",
"(",
"plot_file",
")",
"plt",
".",
"close",
"(",
")",
"return",
"elements",
".",
"image",
"(",
"title",
",",
"description",
",",
"os",
".",
"path",
".",
"basename",
"(",
"plot_file",
")",
")"
] |
Generate a scaling plot.
Args:
timing_data: data returned from a `*_scaling` method
title: the title of the plot
ylabel: the y-axis label of the plot
description: a description of the plot
plot_file: the file to write out to
Returns:
an image element containing the plot file and metadata
|
[
"Generate",
"a",
"scaling",
"plot",
"."
] |
python
|
train
|
charettes/django-mutant
|
mutant/models/model/__init__.py
|
https://github.com/charettes/django-mutant/blob/865a1b712ce30501901c4691ce2110ab03f0f93b/mutant/models/model/__init__.py#L436-L460
|
def clean(self):
"""
Make sure the lookup makes sense
"""
if self.lookup == '?': # Randomly sort
return
else:
lookups = self.lookup.split(LOOKUP_SEP)
opts = self.model_def.model_class()._meta
valid = True
while len(lookups):
lookup = lookups.pop(0)
try:
field = opts.get_field(lookup)
except FieldDoesNotExist:
valid = False
else:
if isinstance(field, models.ForeignKey):
opts = get_remote_field_model(field)._meta
elif len(lookups): # Cannot go any deeper
valid = False
finally:
if not valid:
msg = _("This field doesn't exist")
raise ValidationError({'lookup': [msg]})
|
[
"def",
"clean",
"(",
"self",
")",
":",
"if",
"self",
".",
"lookup",
"==",
"'?'",
":",
"# Randomly sort",
"return",
"else",
":",
"lookups",
"=",
"self",
".",
"lookup",
".",
"split",
"(",
"LOOKUP_SEP",
")",
"opts",
"=",
"self",
".",
"model_def",
".",
"model_class",
"(",
")",
".",
"_meta",
"valid",
"=",
"True",
"while",
"len",
"(",
"lookups",
")",
":",
"lookup",
"=",
"lookups",
".",
"pop",
"(",
"0",
")",
"try",
":",
"field",
"=",
"opts",
".",
"get_field",
"(",
"lookup",
")",
"except",
"FieldDoesNotExist",
":",
"valid",
"=",
"False",
"else",
":",
"if",
"isinstance",
"(",
"field",
",",
"models",
".",
"ForeignKey",
")",
":",
"opts",
"=",
"get_remote_field_model",
"(",
"field",
")",
".",
"_meta",
"elif",
"len",
"(",
"lookups",
")",
":",
"# Cannot go any deeper",
"valid",
"=",
"False",
"finally",
":",
"if",
"not",
"valid",
":",
"msg",
"=",
"_",
"(",
"\"This field doesn't exist\"",
")",
"raise",
"ValidationError",
"(",
"{",
"'lookup'",
":",
"[",
"msg",
"]",
"}",
")"
] |
Make sure the lookup makes sense
|
[
"Make",
"sure",
"the",
"lookup",
"makes",
"sense"
] |
python
|
train
|
ambitioninc/rabbitmq-admin
|
rabbitmq_admin/api.py
|
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L429-L460
|
def create_user_permission(self,
name,
vhost,
configure=None,
write=None,
read=None):
"""
Create a user permission
:param name: The user's name
:type name: str
:param vhost: The vhost to assign the permission to
:type vhost: str
:param configure: A regex for the user permission. Default is ``.*``
:type configure: str
:param write: A regex for the user permission. Default is ``.*``
:type write: str
:param read: A regex for the user permission. Default is ``.*``
:type read: str
"""
data = {
'configure': configure or '.*',
'write': write or '.*',
'read': read or '.*',
}
self._api_put(
'/api/permissions/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name)
),
data=data
)
|
[
"def",
"create_user_permission",
"(",
"self",
",",
"name",
",",
"vhost",
",",
"configure",
"=",
"None",
",",
"write",
"=",
"None",
",",
"read",
"=",
"None",
")",
":",
"data",
"=",
"{",
"'configure'",
":",
"configure",
"or",
"'.*'",
",",
"'write'",
":",
"write",
"or",
"'.*'",
",",
"'read'",
":",
"read",
"or",
"'.*'",
",",
"}",
"self",
".",
"_api_put",
"(",
"'/api/permissions/{0}/{1}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
",",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
")",
",",
"data",
"=",
"data",
")"
] |
Create a user permission
:param name: The user's name
:type name: str
:param vhost: The vhost to assign the permission to
:type vhost: str
:param configure: A regex for the user permission. Default is ``.*``
:type configure: str
:param write: A regex for the user permission. Default is ``.*``
:type write: str
:param read: A regex for the user permission. Default is ``.*``
:type read: str
|
[
"Create",
"a",
"user",
"permission",
":",
"param",
"name",
":",
"The",
"user",
"s",
"name",
":",
"type",
"name",
":",
"str",
":",
"param",
"vhost",
":",
"The",
"vhost",
"to",
"assign",
"the",
"permission",
"to",
":",
"type",
"vhost",
":",
"str"
] |
python
|
train
|
juju/charm-helpers
|
charmhelpers/core/host.py
|
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/host.py#L447-L486
|
def chage(username, lastday=None, expiredate=None, inactive=None,
mindays=None, maxdays=None, root=None, warndays=None):
"""Change user password expiry information
:param str username: User to update
:param str lastday: Set when password was changed in YYYY-MM-DD format
:param str expiredate: Set when user's account will no longer be
accessible in YYYY-MM-DD format.
-1 will remove an account expiration date.
:param str inactive: Set the number of days of inactivity after a password
has expired before the account is locked.
-1 will remove an account's inactivity.
:param str mindays: Set the minimum number of days between password
changes to MIN_DAYS.
0 indicates the password can be changed anytime.
:param str maxdays: Set the maximum number of days during which a
password is valid.
-1 as MAX_DAYS will remove checking maxdays
:param str root: Apply changes in the CHROOT_DIR directory
:param str warndays: Set the number of days of warning before a password
change is required
:raises subprocess.CalledProcessError: if call to chage fails
"""
cmd = ['chage']
if root:
cmd.extend(['--root', root])
if lastday:
cmd.extend(['--lastday', lastday])
if expiredate:
cmd.extend(['--expiredate', expiredate])
if inactive:
cmd.extend(['--inactive', inactive])
if mindays:
cmd.extend(['--mindays', mindays])
if maxdays:
cmd.extend(['--maxdays', maxdays])
if warndays:
cmd.extend(['--warndays', warndays])
cmd.append(username)
subprocess.check_call(cmd)
|
[
"def",
"chage",
"(",
"username",
",",
"lastday",
"=",
"None",
",",
"expiredate",
"=",
"None",
",",
"inactive",
"=",
"None",
",",
"mindays",
"=",
"None",
",",
"maxdays",
"=",
"None",
",",
"root",
"=",
"None",
",",
"warndays",
"=",
"None",
")",
":",
"cmd",
"=",
"[",
"'chage'",
"]",
"if",
"root",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--root'",
",",
"root",
"]",
")",
"if",
"lastday",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--lastday'",
",",
"lastday",
"]",
")",
"if",
"expiredate",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--expiredate'",
",",
"expiredate",
"]",
")",
"if",
"inactive",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--inactive'",
",",
"inactive",
"]",
")",
"if",
"mindays",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--mindays'",
",",
"mindays",
"]",
")",
"if",
"maxdays",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--maxdays'",
",",
"maxdays",
"]",
")",
"if",
"warndays",
":",
"cmd",
".",
"extend",
"(",
"[",
"'--warndays'",
",",
"warndays",
"]",
")",
"cmd",
".",
"append",
"(",
"username",
")",
"subprocess",
".",
"check_call",
"(",
"cmd",
")"
] |
Change user password expiry information
:param str username: User to update
:param str lastday: Set when password was changed in YYYY-MM-DD format
:param str expiredate: Set when user's account will no longer be
accessible in YYYY-MM-DD format.
-1 will remove an account expiration date.
:param str inactive: Set the number of days of inactivity after a password
has expired before the account is locked.
-1 will remove an account's inactivity.
:param str mindays: Set the minimum number of days between password
changes to MIN_DAYS.
0 indicates the password can be changed anytime.
:param str maxdays: Set the maximum number of days during which a
password is valid.
-1 as MAX_DAYS will remove checking maxdays
:param str root: Apply changes in the CHROOT_DIR directory
:param str warndays: Set the number of days of warning before a password
change is required
:raises subprocess.CalledProcessError: if call to chage fails
|
[
"Change",
"user",
"password",
"expiry",
"information"
] |
python
|
train
|
relekang/python-semantic-release
|
semantic_release/vcs_helpers.py
|
https://github.com/relekang/python-semantic-release/blob/76123f410180599a19e7c48da413880185bbea20/semantic_release/vcs_helpers.py#L130-L155
|
def push_new_version(gh_token: str = None, owner: str = None, name: str = None):
"""
Runs git push and git push --tags.
:param gh_token: Github token used to push.
:param owner: Organisation or user that owns the repository.
:param name: Name of repository.
:raises GitError: if GitCommandError is raised
"""
check_repo()
server = 'origin'
if gh_token:
server = 'https://{token}@{repo}'.format(
token=gh_token,
repo='github.com/{owner}/{name}.git'.format(owner=owner, name=name)
)
try:
repo.git.push(server, 'master')
repo.git.push('--tags', server, 'master')
except GitCommandError as error:
message = str(error)
if gh_token:
message = message.replace(gh_token, '[GH_TOKEN]')
raise GitError(message)
|
[
"def",
"push_new_version",
"(",
"gh_token",
":",
"str",
"=",
"None",
",",
"owner",
":",
"str",
"=",
"None",
",",
"name",
":",
"str",
"=",
"None",
")",
":",
"check_repo",
"(",
")",
"server",
"=",
"'origin'",
"if",
"gh_token",
":",
"server",
"=",
"'https://{token}@{repo}'",
".",
"format",
"(",
"token",
"=",
"gh_token",
",",
"repo",
"=",
"'github.com/{owner}/{name}.git'",
".",
"format",
"(",
"owner",
"=",
"owner",
",",
"name",
"=",
"name",
")",
")",
"try",
":",
"repo",
".",
"git",
".",
"push",
"(",
"server",
",",
"'master'",
")",
"repo",
".",
"git",
".",
"push",
"(",
"'--tags'",
",",
"server",
",",
"'master'",
")",
"except",
"GitCommandError",
"as",
"error",
":",
"message",
"=",
"str",
"(",
"error",
")",
"if",
"gh_token",
":",
"message",
"=",
"message",
".",
"replace",
"(",
"gh_token",
",",
"'[GH_TOKEN]'",
")",
"raise",
"GitError",
"(",
"message",
")"
] |
Runs git push and git push --tags.
:param gh_token: Github token used to push.
:param owner: Organisation or user that owns the repository.
:param name: Name of repository.
:raises GitError: if GitCommandError is raised
|
[
"Runs",
"git",
"push",
"and",
"git",
"push",
"--",
"tags",
"."
] |
python
|
train
|
palantir/python-language-server
|
pyls/_utils.py
|
https://github.com/palantir/python-language-server/blob/96e08d85635382d17024c352306c4759f124195d/pyls/_utils.py#L78-L96
|
def merge_dicts(dict_a, dict_b):
"""Recursively merge dictionary b into dictionary a.
If override_nones is True, then
"""
def _merge_dicts_(a, b):
for key in set(a.keys()).union(b.keys()):
if key in a and key in b:
if isinstance(a[key], dict) and isinstance(b[key], dict):
yield (key, dict(_merge_dicts_(a[key], b[key])))
elif b[key] is not None:
yield (key, b[key])
else:
yield (key, a[key])
elif key in a:
yield (key, a[key])
elif b[key] is not None:
yield (key, b[key])
return dict(_merge_dicts_(dict_a, dict_b))
|
[
"def",
"merge_dicts",
"(",
"dict_a",
",",
"dict_b",
")",
":",
"def",
"_merge_dicts_",
"(",
"a",
",",
"b",
")",
":",
"for",
"key",
"in",
"set",
"(",
"a",
".",
"keys",
"(",
")",
")",
".",
"union",
"(",
"b",
".",
"keys",
"(",
")",
")",
":",
"if",
"key",
"in",
"a",
"and",
"key",
"in",
"b",
":",
"if",
"isinstance",
"(",
"a",
"[",
"key",
"]",
",",
"dict",
")",
"and",
"isinstance",
"(",
"b",
"[",
"key",
"]",
",",
"dict",
")",
":",
"yield",
"(",
"key",
",",
"dict",
"(",
"_merge_dicts_",
"(",
"a",
"[",
"key",
"]",
",",
"b",
"[",
"key",
"]",
")",
")",
")",
"elif",
"b",
"[",
"key",
"]",
"is",
"not",
"None",
":",
"yield",
"(",
"key",
",",
"b",
"[",
"key",
"]",
")",
"else",
":",
"yield",
"(",
"key",
",",
"a",
"[",
"key",
"]",
")",
"elif",
"key",
"in",
"a",
":",
"yield",
"(",
"key",
",",
"a",
"[",
"key",
"]",
")",
"elif",
"b",
"[",
"key",
"]",
"is",
"not",
"None",
":",
"yield",
"(",
"key",
",",
"b",
"[",
"key",
"]",
")",
"return",
"dict",
"(",
"_merge_dicts_",
"(",
"dict_a",
",",
"dict_b",
")",
")"
] |
Recursively merge dictionary b into dictionary a.
If override_nones is True, then
|
[
"Recursively",
"merge",
"dictionary",
"b",
"into",
"dictionary",
"a",
"."
] |
python
|
train
|
gccxml/pygccxml
|
pygccxml/parser/source_reader.py
|
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/source_reader.py#L275-L295
|
def create_xml_file_from_string(self, content, destination=None):
"""
Creates XML file from text.
:param content: C++ source code
:type content: str
:param destination: file name for xml file
:type destination: str
:rtype: returns file name of xml file
"""
header_file = utils.create_temp_file_name(suffix='.h')
try:
with open(header_file, "w+") as header:
header.write(content)
xml_file = self.create_xml_file(header_file, destination)
finally:
utils.remove_file_no_raise(header_file, self.__config)
return xml_file
|
[
"def",
"create_xml_file_from_string",
"(",
"self",
",",
"content",
",",
"destination",
"=",
"None",
")",
":",
"header_file",
"=",
"utils",
".",
"create_temp_file_name",
"(",
"suffix",
"=",
"'.h'",
")",
"try",
":",
"with",
"open",
"(",
"header_file",
",",
"\"w+\"",
")",
"as",
"header",
":",
"header",
".",
"write",
"(",
"content",
")",
"xml_file",
"=",
"self",
".",
"create_xml_file",
"(",
"header_file",
",",
"destination",
")",
"finally",
":",
"utils",
".",
"remove_file_no_raise",
"(",
"header_file",
",",
"self",
".",
"__config",
")",
"return",
"xml_file"
] |
Creates XML file from text.
:param content: C++ source code
:type content: str
:param destination: file name for xml file
:type destination: str
:rtype: returns file name of xml file
|
[
"Creates",
"XML",
"file",
"from",
"text",
"."
] |
python
|
train
|
secdev/scapy
|
scapy/layers/tls/keyexchange.py
|
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/keyexchange.py#L73-L86
|
def phantom_decorate(f, get_or_add):
"""
Decorator for version-dependent fields.
If get_or_add is True (means get), we return s, self.phantom_value.
If it is False (means add), we return s.
"""
def wrapper(*args):
self, pkt, s = args[:3]
if phantom_mode(pkt):
if get_or_add:
return s, self.phantom_value
return s
return f(*args)
return wrapper
|
[
"def",
"phantom_decorate",
"(",
"f",
",",
"get_or_add",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
")",
":",
"self",
",",
"pkt",
",",
"s",
"=",
"args",
"[",
":",
"3",
"]",
"if",
"phantom_mode",
"(",
"pkt",
")",
":",
"if",
"get_or_add",
":",
"return",
"s",
",",
"self",
".",
"phantom_value",
"return",
"s",
"return",
"f",
"(",
"*",
"args",
")",
"return",
"wrapper"
] |
Decorator for version-dependent fields.
If get_or_add is True (means get), we return s, self.phantom_value.
If it is False (means add), we return s.
|
[
"Decorator",
"for",
"version",
"-",
"dependent",
"fields",
".",
"If",
"get_or_add",
"is",
"True",
"(",
"means",
"get",
")",
"we",
"return",
"s",
"self",
".",
"phantom_value",
".",
"If",
"it",
"is",
"False",
"(",
"means",
"add",
")",
"we",
"return",
"s",
"."
] |
python
|
train
|
kgiusti/pyngus
|
pyngus/link.py
|
https://github.com/kgiusti/pyngus/blob/5392392046989f1bb84ba938c30e4d48311075f1/pyngus/link.py#L171-L178
|
def target_address(self):
"""Return the authorative target of the link."""
# If link is a receiver, target is determined by the local
# value, else use the remote.
if self._pn_link.is_receiver:
return self._pn_link.target.address
else:
return self._pn_link.remote_target.address
|
[
"def",
"target_address",
"(",
"self",
")",
":",
"# If link is a receiver, target is determined by the local",
"# value, else use the remote.",
"if",
"self",
".",
"_pn_link",
".",
"is_receiver",
":",
"return",
"self",
".",
"_pn_link",
".",
"target",
".",
"address",
"else",
":",
"return",
"self",
".",
"_pn_link",
".",
"remote_target",
".",
"address"
] |
Return the authorative target of the link.
|
[
"Return",
"the",
"authorative",
"target",
"of",
"the",
"link",
"."
] |
python
|
test
|
assemblerflow/flowcraft
|
flowcraft/templates/process_newick.py
|
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/templates/process_newick.py#L53-L85
|
def main(newick):
"""Main executor of the process_newick template.
Parameters
----------
newick : str
path to the newick file.
"""
logger.info("Starting newick file processing")
print(newick)
tree = dendropy.Tree.get(file=open(newick, 'r'), schema="newick")
tree.reroot_at_midpoint()
to_write=tree.as_string("newick").strip().replace("[&R] ", '').replace(' ', '_').replace("'", "")
with open(".report.json", "w") as json_report:
json_dic = {
"treeData": [{
"trees": [
to_write
]
}],
}
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass")
|
[
"def",
"main",
"(",
"newick",
")",
":",
"logger",
".",
"info",
"(",
"\"Starting newick file processing\"",
")",
"print",
"(",
"newick",
")",
"tree",
"=",
"dendropy",
".",
"Tree",
".",
"get",
"(",
"file",
"=",
"open",
"(",
"newick",
",",
"'r'",
")",
",",
"schema",
"=",
"\"newick\"",
")",
"tree",
".",
"reroot_at_midpoint",
"(",
")",
"to_write",
"=",
"tree",
".",
"as_string",
"(",
"\"newick\"",
")",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"\"[&R] \"",
",",
"''",
")",
".",
"replace",
"(",
"' '",
",",
"'_'",
")",
".",
"replace",
"(",
"\"'\"",
",",
"\"\"",
")",
"with",
"open",
"(",
"\".report.json\"",
",",
"\"w\"",
")",
"as",
"json_report",
":",
"json_dic",
"=",
"{",
"\"treeData\"",
":",
"[",
"{",
"\"trees\"",
":",
"[",
"to_write",
"]",
"}",
"]",
",",
"}",
"json_report",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json_dic",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
")",
")",
"with",
"open",
"(",
"\".status\"",
",",
"\"w\"",
")",
"as",
"status_fh",
":",
"status_fh",
".",
"write",
"(",
"\"pass\"",
")"
] |
Main executor of the process_newick template.
Parameters
----------
newick : str
path to the newick file.
|
[
"Main",
"executor",
"of",
"the",
"process_newick",
"template",
"."
] |
python
|
test
|
DataBiosphere/dsub
|
dsub/providers/google_v2.py
|
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/google_v2.py#L405-L416
|
def _map(self, event):
"""Extract elements from an operation event and map to a named event."""
description = event.get('description', '')
start_time = google_base.parse_rfc3339_utc_string(
event.get('timestamp', ''))
for name, regex in _EVENT_REGEX_MAP.items():
match = regex.match(description)
if match:
return {'name': name, 'start-time': start_time}, match
return {'name': description, 'start-time': start_time}, None
|
[
"def",
"_map",
"(",
"self",
",",
"event",
")",
":",
"description",
"=",
"event",
".",
"get",
"(",
"'description'",
",",
"''",
")",
"start_time",
"=",
"google_base",
".",
"parse_rfc3339_utc_string",
"(",
"event",
".",
"get",
"(",
"'timestamp'",
",",
"''",
")",
")",
"for",
"name",
",",
"regex",
"in",
"_EVENT_REGEX_MAP",
".",
"items",
"(",
")",
":",
"match",
"=",
"regex",
".",
"match",
"(",
"description",
")",
"if",
"match",
":",
"return",
"{",
"'name'",
":",
"name",
",",
"'start-time'",
":",
"start_time",
"}",
",",
"match",
"return",
"{",
"'name'",
":",
"description",
",",
"'start-time'",
":",
"start_time",
"}",
",",
"None"
] |
Extract elements from an operation event and map to a named event.
|
[
"Extract",
"elements",
"from",
"an",
"operation",
"event",
"and",
"map",
"to",
"a",
"named",
"event",
"."
] |
python
|
valid
|
angr/angr
|
angr/analyses/cfg/cfg_base.py
|
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_base.py#L367-L401
|
def _to_snippet(self, cfg_node=None, addr=None, size=None, thumb=False, jumpkind=None, base_state=None):
"""
Convert a CFGNode instance to a CodeNode object.
:param angr.analyses.CFGNode cfg_node: The CFGNode instance.
:param int addr: Address of the node. Only used when `cfg_node` is None.
:param bool thumb: Whether this is in THUMB mode or not. Only used for ARM code and when `cfg_node` is None.
:param str or None jumpkind: Jumpkind of this node.
:param SimState or None base_state: The state where BlockNode should be created from.
:return: A converted CodeNode instance.
:rtype: CodeNode
"""
if cfg_node is not None:
addr = cfg_node.addr
size = cfg_node.size
thumb = cfg_node.thumb
else:
addr = addr
size = size
thumb = thumb
if addr is None:
raise ValueError('_to_snippet(): Either cfg_node or addr must be provided.')
if self.project.is_hooked(addr) and jumpkind != 'Ijk_NoHook':
hooker = self.project._sim_procedures[addr]
size = hooker.kwargs.get('length', 0)
return HookNode(addr, size, type(hooker))
if cfg_node is not None:
return BlockNode(addr, size, thumb=thumb, bytestr=cfg_node.byte_string) # pylint: disable=no-member
else:
return self.project.factory.snippet(addr, size=size, jumpkind=jumpkind, thumb=thumb,
backup_state=base_state)
|
[
"def",
"_to_snippet",
"(",
"self",
",",
"cfg_node",
"=",
"None",
",",
"addr",
"=",
"None",
",",
"size",
"=",
"None",
",",
"thumb",
"=",
"False",
",",
"jumpkind",
"=",
"None",
",",
"base_state",
"=",
"None",
")",
":",
"if",
"cfg_node",
"is",
"not",
"None",
":",
"addr",
"=",
"cfg_node",
".",
"addr",
"size",
"=",
"cfg_node",
".",
"size",
"thumb",
"=",
"cfg_node",
".",
"thumb",
"else",
":",
"addr",
"=",
"addr",
"size",
"=",
"size",
"thumb",
"=",
"thumb",
"if",
"addr",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'_to_snippet(): Either cfg_node or addr must be provided.'",
")",
"if",
"self",
".",
"project",
".",
"is_hooked",
"(",
"addr",
")",
"and",
"jumpkind",
"!=",
"'Ijk_NoHook'",
":",
"hooker",
"=",
"self",
".",
"project",
".",
"_sim_procedures",
"[",
"addr",
"]",
"size",
"=",
"hooker",
".",
"kwargs",
".",
"get",
"(",
"'length'",
",",
"0",
")",
"return",
"HookNode",
"(",
"addr",
",",
"size",
",",
"type",
"(",
"hooker",
")",
")",
"if",
"cfg_node",
"is",
"not",
"None",
":",
"return",
"BlockNode",
"(",
"addr",
",",
"size",
",",
"thumb",
"=",
"thumb",
",",
"bytestr",
"=",
"cfg_node",
".",
"byte_string",
")",
"# pylint: disable=no-member",
"else",
":",
"return",
"self",
".",
"project",
".",
"factory",
".",
"snippet",
"(",
"addr",
",",
"size",
"=",
"size",
",",
"jumpkind",
"=",
"jumpkind",
",",
"thumb",
"=",
"thumb",
",",
"backup_state",
"=",
"base_state",
")"
] |
Convert a CFGNode instance to a CodeNode object.
:param angr.analyses.CFGNode cfg_node: The CFGNode instance.
:param int addr: Address of the node. Only used when `cfg_node` is None.
:param bool thumb: Whether this is in THUMB mode or not. Only used for ARM code and when `cfg_node` is None.
:param str or None jumpkind: Jumpkind of this node.
:param SimState or None base_state: The state where BlockNode should be created from.
:return: A converted CodeNode instance.
:rtype: CodeNode
|
[
"Convert",
"a",
"CFGNode",
"instance",
"to",
"a",
"CodeNode",
"object",
"."
] |
python
|
train
|
urbn/Caesium
|
caesium/document.py
|
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/document.py#L640-L663
|
def patch(self, predicate_value, attrs, predicate_attribute="_id"):
"""Update an existing document via a $set query, this will apply only these attributes.
:param predicate_value: The value of the predicate
:param dict attrs: The dictionary to apply to this object
:param str predicate_attribute: The attribute to query for to find the object to set this data ond
:returns: JSON Mongo client response including the "n" key to show number of objects effected
t"""
if predicate_attribute=="_id" and not isinstance(predicate_value, ObjectId):
predicate_value = ObjectId(predicate_value)
predicate = {predicate_attribute: predicate_value}
dct = self._dictionary_to_cursor(attrs)
if dct.get("_id"):
del dct["_id"]
set = { "$set": dct }
mongo_response = yield self.collection.update(predicate, set, False)
raise Return(self._obj_cursor_to_dictionary(mongo_response))
|
[
"def",
"patch",
"(",
"self",
",",
"predicate_value",
",",
"attrs",
",",
"predicate_attribute",
"=",
"\"_id\"",
")",
":",
"if",
"predicate_attribute",
"==",
"\"_id\"",
"and",
"not",
"isinstance",
"(",
"predicate_value",
",",
"ObjectId",
")",
":",
"predicate_value",
"=",
"ObjectId",
"(",
"predicate_value",
")",
"predicate",
"=",
"{",
"predicate_attribute",
":",
"predicate_value",
"}",
"dct",
"=",
"self",
".",
"_dictionary_to_cursor",
"(",
"attrs",
")",
"if",
"dct",
".",
"get",
"(",
"\"_id\"",
")",
":",
"del",
"dct",
"[",
"\"_id\"",
"]",
"set",
"=",
"{",
"\"$set\"",
":",
"dct",
"}",
"mongo_response",
"=",
"yield",
"self",
".",
"collection",
".",
"update",
"(",
"predicate",
",",
"set",
",",
"False",
")",
"raise",
"Return",
"(",
"self",
".",
"_obj_cursor_to_dictionary",
"(",
"mongo_response",
")",
")"
] |
Update an existing document via a $set query, this will apply only these attributes.
:param predicate_value: The value of the predicate
:param dict attrs: The dictionary to apply to this object
:param str predicate_attribute: The attribute to query for to find the object to set this data ond
:returns: JSON Mongo client response including the "n" key to show number of objects effected
t
|
[
"Update",
"an",
"existing",
"document",
"via",
"a",
"$set",
"query",
"this",
"will",
"apply",
"only",
"these",
"attributes",
"."
] |
python
|
train
|
chrisrink10/basilisp
|
src/basilisp/lang/list.py
|
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/list.py#L86-L90
|
def list(members, meta=None) -> List: # pylint:disable=redefined-builtin
"""Creates a new list."""
return List( # pylint: disable=abstract-class-instantiated
plist(iterable=members), meta=meta
)
|
[
"def",
"list",
"(",
"members",
",",
"meta",
"=",
"None",
")",
"->",
"List",
":",
"# pylint:disable=redefined-builtin",
"return",
"List",
"(",
"# pylint: disable=abstract-class-instantiated",
"plist",
"(",
"iterable",
"=",
"members",
")",
",",
"meta",
"=",
"meta",
")"
] |
Creates a new list.
|
[
"Creates",
"a",
"new",
"list",
"."
] |
python
|
test
|
quantopian/pyfolio
|
pyfolio/plotting.py
|
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/plotting.py#L1025-L1061
|
def plot_gross_leverage(returns, positions, ax=None, **kwargs):
"""
Plots gross leverage versus date.
Gross leverage is the sum of long and short exposure per share
divided by net asset value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
"""
if ax is None:
ax = plt.gca()
gl = timeseries.gross_lev(positions)
gl.plot(lw=0.5, color='limegreen', legend=False, ax=ax, **kwargs)
ax.axhline(gl.mean(), color='g', linestyle='--', lw=3)
ax.set_title('Gross leverage')
ax.set_ylabel('Gross leverage')
ax.set_xlabel('')
return ax
|
[
"def",
"plot_gross_leverage",
"(",
"returns",
",",
"positions",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"gl",
"=",
"timeseries",
".",
"gross_lev",
"(",
"positions",
")",
"gl",
".",
"plot",
"(",
"lw",
"=",
"0.5",
",",
"color",
"=",
"'limegreen'",
",",
"legend",
"=",
"False",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"axhline",
"(",
"gl",
".",
"mean",
"(",
")",
",",
"color",
"=",
"'g'",
",",
"linestyle",
"=",
"'--'",
",",
"lw",
"=",
"3",
")",
"ax",
".",
"set_title",
"(",
"'Gross leverage'",
")",
"ax",
".",
"set_ylabel",
"(",
"'Gross leverage'",
")",
"ax",
".",
"set_xlabel",
"(",
"''",
")",
"return",
"ax"
] |
Plots gross leverage versus date.
Gross leverage is the sum of long and short exposure per share
divided by net asset value.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
ax : matplotlib.Axes, optional
Axes upon which to plot.
**kwargs, optional
Passed to plotting function.
Returns
-------
ax : matplotlib.Axes
The axes that were plotted on.
|
[
"Plots",
"gross",
"leverage",
"versus",
"date",
"."
] |
python
|
valid
|
DAI-Lab/Copulas
|
copulas/multivariate/gaussian.py
|
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/multivariate/gaussian.py#L159-L172
|
def probability_density(self, X):
"""Compute probability density function for given copula family.
Args:
X: `numpy.ndarray` or `pandas.DataFrame`
Returns:
np.array: Probability density for the input values.
"""
self.check_fit()
# make cov positive semi-definite
covariance = self.covariance * np.identity(self.covariance.shape[0])
return stats.multivariate_normal.pdf(X, cov=covariance)
|
[
"def",
"probability_density",
"(",
"self",
",",
"X",
")",
":",
"self",
".",
"check_fit",
"(",
")",
"# make cov positive semi-definite",
"covariance",
"=",
"self",
".",
"covariance",
"*",
"np",
".",
"identity",
"(",
"self",
".",
"covariance",
".",
"shape",
"[",
"0",
"]",
")",
"return",
"stats",
".",
"multivariate_normal",
".",
"pdf",
"(",
"X",
",",
"cov",
"=",
"covariance",
")"
] |
Compute probability density function for given copula family.
Args:
X: `numpy.ndarray` or `pandas.DataFrame`
Returns:
np.array: Probability density for the input values.
|
[
"Compute",
"probability",
"density",
"function",
"for",
"given",
"copula",
"family",
"."
] |
python
|
train
|
limodou/uliweb
|
uliweb/utils/generic.py
|
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/generic.py#L340-L378
|
def get_fields(model, fields, meta=None):
"""
Acording to model and fields to get fields list
Each field element is a two elements tuple, just like:
(name, field_obj)
"""
model = get_model(model)
if fields is not None:
f = fields
elif meta and hasattr(model, meta):
m = getattr(model, meta)
if hasattr(m, 'fields'):
f = m.fields
else:
f = model._fields_list
else:
f = model._fields_list
fields_list = []
for x in f:
field = {}
if isinstance(x, str): #so x is field_name
field['name'] = x
elif isinstance(x, tuple):
field['name'] = x[0]
field['field'] = x[1]
elif isinstance(x, dict):
field = x.copy()
else:
raise UliwebError('Field definition {!r} is not right, it should be just like (field_name, form_field_obj)'.format(x))
if 'prop' not in field:
if hasattr(model, field['name']):
field['prop'] = getattr(model, field['name'])
else:
field['prop'] = None
fields_list.append((field['name'], field))
return fields_list
|
[
"def",
"get_fields",
"(",
"model",
",",
"fields",
",",
"meta",
"=",
"None",
")",
":",
"model",
"=",
"get_model",
"(",
"model",
")",
"if",
"fields",
"is",
"not",
"None",
":",
"f",
"=",
"fields",
"elif",
"meta",
"and",
"hasattr",
"(",
"model",
",",
"meta",
")",
":",
"m",
"=",
"getattr",
"(",
"model",
",",
"meta",
")",
"if",
"hasattr",
"(",
"m",
",",
"'fields'",
")",
":",
"f",
"=",
"m",
".",
"fields",
"else",
":",
"f",
"=",
"model",
".",
"_fields_list",
"else",
":",
"f",
"=",
"model",
".",
"_fields_list",
"fields_list",
"=",
"[",
"]",
"for",
"x",
"in",
"f",
":",
"field",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"x",
",",
"str",
")",
":",
"#so x is field_name\r",
"field",
"[",
"'name'",
"]",
"=",
"x",
"elif",
"isinstance",
"(",
"x",
",",
"tuple",
")",
":",
"field",
"[",
"'name'",
"]",
"=",
"x",
"[",
"0",
"]",
"field",
"[",
"'field'",
"]",
"=",
"x",
"[",
"1",
"]",
"elif",
"isinstance",
"(",
"x",
",",
"dict",
")",
":",
"field",
"=",
"x",
".",
"copy",
"(",
")",
"else",
":",
"raise",
"UliwebError",
"(",
"'Field definition {!r} is not right, it should be just like (field_name, form_field_obj)'",
".",
"format",
"(",
"x",
")",
")",
"if",
"'prop'",
"not",
"in",
"field",
":",
"if",
"hasattr",
"(",
"model",
",",
"field",
"[",
"'name'",
"]",
")",
":",
"field",
"[",
"'prop'",
"]",
"=",
"getattr",
"(",
"model",
",",
"field",
"[",
"'name'",
"]",
")",
"else",
":",
"field",
"[",
"'prop'",
"]",
"=",
"None",
"fields_list",
".",
"append",
"(",
"(",
"field",
"[",
"'name'",
"]",
",",
"field",
")",
")",
"return",
"fields_list"
] |
Acording to model and fields to get fields list
Each field element is a two elements tuple, just like:
(name, field_obj)
|
[
"Acording",
"to",
"model",
"and",
"fields",
"to",
"get",
"fields",
"list",
"Each",
"field",
"element",
"is",
"a",
"two",
"elements",
"tuple",
"just",
"like",
":",
"(",
"name",
"field_obj",
")"
] |
python
|
train
|
Qiskit/qiskit-terra
|
qiskit/pulse/timeslots.py
|
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/pulse/timeslots.py#L179-L189
|
def ch_start_time(self, *channels: List[Channel]) -> int:
"""Return earliest start time in this collection.
Args:
*channels: Channels over which to obtain start_time.
"""
intervals = list(itertools.chain(*(self._table[chan] for chan in channels
if chan in self._table)))
if intervals:
return min((interval.begin for interval in intervals))
return 0
|
[
"def",
"ch_start_time",
"(",
"self",
",",
"*",
"channels",
":",
"List",
"[",
"Channel",
"]",
")",
"->",
"int",
":",
"intervals",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"(",
"self",
".",
"_table",
"[",
"chan",
"]",
"for",
"chan",
"in",
"channels",
"if",
"chan",
"in",
"self",
".",
"_table",
")",
")",
")",
"if",
"intervals",
":",
"return",
"min",
"(",
"(",
"interval",
".",
"begin",
"for",
"interval",
"in",
"intervals",
")",
")",
"return",
"0"
] |
Return earliest start time in this collection.
Args:
*channels: Channels over which to obtain start_time.
|
[
"Return",
"earliest",
"start",
"time",
"in",
"this",
"collection",
"."
] |
python
|
test
|
opencivicdata/pupa
|
pupa/importers/base.py
|
https://github.com/opencivicdata/pupa/blob/18e0ddc4344804987ee0f2227bf600375538dbd5/pupa/importers/base.py#L219-L243
|
def import_data(self, data_items):
""" import a bunch of dicts together """
# keep counts of all actions
record = {
'insert': 0, 'update': 0, 'noop': 0,
'start': utcnow(),
'records': {
'insert': [],
'update': [],
'noop': [],
}
}
for json_id, data in self._prepare_imports(data_items):
obj_id, what = self.import_item(data)
self.json_to_db_id[json_id] = obj_id
record['records'][what].append(obj_id)
record[what] += 1
# all objects are loaded, a perfect time to do inter-object resolution and other tasks
self.postimport()
record['end'] = utcnow()
return {self._type: record}
|
[
"def",
"import_data",
"(",
"self",
",",
"data_items",
")",
":",
"# keep counts of all actions",
"record",
"=",
"{",
"'insert'",
":",
"0",
",",
"'update'",
":",
"0",
",",
"'noop'",
":",
"0",
",",
"'start'",
":",
"utcnow",
"(",
")",
",",
"'records'",
":",
"{",
"'insert'",
":",
"[",
"]",
",",
"'update'",
":",
"[",
"]",
",",
"'noop'",
":",
"[",
"]",
",",
"}",
"}",
"for",
"json_id",
",",
"data",
"in",
"self",
".",
"_prepare_imports",
"(",
"data_items",
")",
":",
"obj_id",
",",
"what",
"=",
"self",
".",
"import_item",
"(",
"data",
")",
"self",
".",
"json_to_db_id",
"[",
"json_id",
"]",
"=",
"obj_id",
"record",
"[",
"'records'",
"]",
"[",
"what",
"]",
".",
"append",
"(",
"obj_id",
")",
"record",
"[",
"what",
"]",
"+=",
"1",
"# all objects are loaded, a perfect time to do inter-object resolution and other tasks",
"self",
".",
"postimport",
"(",
")",
"record",
"[",
"'end'",
"]",
"=",
"utcnow",
"(",
")",
"return",
"{",
"self",
".",
"_type",
":",
"record",
"}"
] |
import a bunch of dicts together
|
[
"import",
"a",
"bunch",
"of",
"dicts",
"together"
] |
python
|
train
|
i3visio/osrframework
|
osrframework/thirdparties/pipl_com/lib/thumbnail.py
|
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/thumbnail.py#L37-L100
|
def generate_thumbnail_url(image_url, height, width, favicon_domain=None,
zoom_face=True, api_key=None):
"""Take an image URL and generate a thumbnail URL for that image.
Args:
image_url -- unicode (or utf8 encoded str), URL of the image you want to
thumbnail.
height -- int, requested thumbnail height in pixels, maximum 500.
width -- int, requested thumbnail width in pixels, maximum 500.
favicon_domain -- unicode (or utf8 encoded str), optional, the domain of
the website where the image came from, the favicon will
be added to the corner of the thumbnail, recommended for
copyright reasones.
IMPORTANT: Don't assume that the domain of the website is
the domain from `image_url`, it's possible that
domain1.com hosts its images on domain2.com.
zoom_face -- bool, indicates whether you want the thumbnail to zoom on the
face in the image (in case there is a face) or not.
api_key -- str, a valid API key (use "samplekey" for experimenting).
Note that you can set a default API key
(osrframework.thirdparties.pipl_com.lib.thumbnail.default_api_key = '<your_key>') instead of
passing your key in each call.
ValueError is raised in case of illegal parameters.
Example (thumbnail URL from an image URL):
>>> from osrframework.thirdparties.pipl_com.lib.thumbnail import generate_thumbnail_url
>>> image_url = 'http://a7.twimg.com/a/ab76f.jpg'
>>> generate_thumbnail_url(image_url, 100, 100,
favicon_domain='twitter.com',
api_key='samplekey')
'http://api.pipl.com/thumbnail/v2/?key=samplekey&
favicon_domain=twitter.com&height=100&width=100&zoom_face=True&
image_url=http%3A%2F%2Fa7.twimg.com%2Fa%2Fab76f.jpg'
Example (thumbnail URL from a record that came in the response of our
Search API):
>>> from osrframework.thirdparties.pipl_com.lib.thumbnail import generate_thumbnail_url
>>> generate_thumbnail_url(record.images[0].url, 100, 100,
favicon_domain=record.source.domain,
api_key='samplekey')
'http://api.pipl.com/thumbnail/v2/?key=samplekey&
favicon_domain=twitter.com&height=100&width=100&zoom_face=True&
image_url=http%3A%2F%2Fa7.twimg.com%2Fa%2Fab76f.jpg'
"""
if not (api_key or default_api_key):
raise ValueError('A valid API key is required')
if not Image(url=image_url).is_valid_url:
raise ValueError('image_url is not a valid URL')
if not (0 < height <= MAX_PIXELS and 0 < width <= MAX_PIXELS):
raise ValueError('height/width must be between 0 and %d' % MAX_PIXELS)
query = {
'key': to_utf8(api_key or default_api_key),
'image_url': urllib.unquote(to_utf8(image_url)),
'height': height,
'width': width,
'favicon_domain': to_utf8(favicon_domain or ''),
'zoom_face': zoom_face,
}
return BASE_URL + urllib.urlencode(query)
|
[
"def",
"generate_thumbnail_url",
"(",
"image_url",
",",
"height",
",",
"width",
",",
"favicon_domain",
"=",
"None",
",",
"zoom_face",
"=",
"True",
",",
"api_key",
"=",
"None",
")",
":",
"if",
"not",
"(",
"api_key",
"or",
"default_api_key",
")",
":",
"raise",
"ValueError",
"(",
"'A valid API key is required'",
")",
"if",
"not",
"Image",
"(",
"url",
"=",
"image_url",
")",
".",
"is_valid_url",
":",
"raise",
"ValueError",
"(",
"'image_url is not a valid URL'",
")",
"if",
"not",
"(",
"0",
"<",
"height",
"<=",
"MAX_PIXELS",
"and",
"0",
"<",
"width",
"<=",
"MAX_PIXELS",
")",
":",
"raise",
"ValueError",
"(",
"'height/width must be between 0 and %d'",
"%",
"MAX_PIXELS",
")",
"query",
"=",
"{",
"'key'",
":",
"to_utf8",
"(",
"api_key",
"or",
"default_api_key",
")",
",",
"'image_url'",
":",
"urllib",
".",
"unquote",
"(",
"to_utf8",
"(",
"image_url",
")",
")",
",",
"'height'",
":",
"height",
",",
"'width'",
":",
"width",
",",
"'favicon_domain'",
":",
"to_utf8",
"(",
"favicon_domain",
"or",
"''",
")",
",",
"'zoom_face'",
":",
"zoom_face",
",",
"}",
"return",
"BASE_URL",
"+",
"urllib",
".",
"urlencode",
"(",
"query",
")"
] |
Take an image URL and generate a thumbnail URL for that image.
Args:
image_url -- unicode (or utf8 encoded str), URL of the image you want to
thumbnail.
height -- int, requested thumbnail height in pixels, maximum 500.
width -- int, requested thumbnail width in pixels, maximum 500.
favicon_domain -- unicode (or utf8 encoded str), optional, the domain of
the website where the image came from, the favicon will
be added to the corner of the thumbnail, recommended for
copyright reasones.
IMPORTANT: Don't assume that the domain of the website is
the domain from `image_url`, it's possible that
domain1.com hosts its images on domain2.com.
zoom_face -- bool, indicates whether you want the thumbnail to zoom on the
face in the image (in case there is a face) or not.
api_key -- str, a valid API key (use "samplekey" for experimenting).
Note that you can set a default API key
(osrframework.thirdparties.pipl_com.lib.thumbnail.default_api_key = '<your_key>') instead of
passing your key in each call.
ValueError is raised in case of illegal parameters.
Example (thumbnail URL from an image URL):
>>> from osrframework.thirdparties.pipl_com.lib.thumbnail import generate_thumbnail_url
>>> image_url = 'http://a7.twimg.com/a/ab76f.jpg'
>>> generate_thumbnail_url(image_url, 100, 100,
favicon_domain='twitter.com',
api_key='samplekey')
'http://api.pipl.com/thumbnail/v2/?key=samplekey&
favicon_domain=twitter.com&height=100&width=100&zoom_face=True&
image_url=http%3A%2F%2Fa7.twimg.com%2Fa%2Fab76f.jpg'
Example (thumbnail URL from a record that came in the response of our
Search API):
>>> from osrframework.thirdparties.pipl_com.lib.thumbnail import generate_thumbnail_url
>>> generate_thumbnail_url(record.images[0].url, 100, 100,
favicon_domain=record.source.domain,
api_key='samplekey')
'http://api.pipl.com/thumbnail/v2/?key=samplekey&
favicon_domain=twitter.com&height=100&width=100&zoom_face=True&
image_url=http%3A%2F%2Fa7.twimg.com%2Fa%2Fab76f.jpg'
|
[
"Take",
"an",
"image",
"URL",
"and",
"generate",
"a",
"thumbnail",
"URL",
"for",
"that",
"image",
".",
"Args",
":",
"image_url",
"--",
"unicode",
"(",
"or",
"utf8",
"encoded",
"str",
")",
"URL",
"of",
"the",
"image",
"you",
"want",
"to",
"thumbnail",
".",
"height",
"--",
"int",
"requested",
"thumbnail",
"height",
"in",
"pixels",
"maximum",
"500",
".",
"width",
"--",
"int",
"requested",
"thumbnail",
"width",
"in",
"pixels",
"maximum",
"500",
".",
"favicon_domain",
"--",
"unicode",
"(",
"or",
"utf8",
"encoded",
"str",
")",
"optional",
"the",
"domain",
"of",
"the",
"website",
"where",
"the",
"image",
"came",
"from",
"the",
"favicon",
"will",
"be",
"added",
"to",
"the",
"corner",
"of",
"the",
"thumbnail",
"recommended",
"for",
"copyright",
"reasones",
".",
"IMPORTANT",
":",
"Don",
"t",
"assume",
"that",
"the",
"domain",
"of",
"the",
"website",
"is",
"the",
"domain",
"from",
"image_url",
"it",
"s",
"possible",
"that",
"domain1",
".",
"com",
"hosts",
"its",
"images",
"on",
"domain2",
".",
"com",
".",
"zoom_face",
"--",
"bool",
"indicates",
"whether",
"you",
"want",
"the",
"thumbnail",
"to",
"zoom",
"on",
"the",
"face",
"in",
"the",
"image",
"(",
"in",
"case",
"there",
"is",
"a",
"face",
")",
"or",
"not",
".",
"api_key",
"--",
"str",
"a",
"valid",
"API",
"key",
"(",
"use",
"samplekey",
"for",
"experimenting",
")",
".",
"Note",
"that",
"you",
"can",
"set",
"a",
"default",
"API",
"key",
"(",
"osrframework",
".",
"thirdparties",
".",
"pipl_com",
".",
"lib",
".",
"thumbnail",
".",
"default_api_key",
"=",
"<your_key",
">",
")",
"instead",
"of",
"passing",
"your",
"key",
"in",
"each",
"call",
".",
"ValueError",
"is",
"raised",
"in",
"case",
"of",
"illegal",
"parameters",
".",
"Example",
"(",
"thumbnail",
"URL",
"from",
"an",
"image",
"URL",
")",
":",
">>>",
"from",
"osrframework",
".",
"thirdparties",
".",
"pipl_com",
".",
"lib",
".",
"thumbnail",
"import",
"generate_thumbnail_url",
">>>",
"image_url",
"=",
"http",
":",
"//",
"a7",
".",
"twimg",
".",
"com",
"/",
"a",
"/",
"ab76f",
".",
"jpg",
">>>",
"generate_thumbnail_url",
"(",
"image_url",
"100",
"100",
"favicon_domain",
"=",
"twitter",
".",
"com",
"api_key",
"=",
"samplekey",
")",
"http",
":",
"//",
"api",
".",
"pipl",
".",
"com",
"/",
"thumbnail",
"/",
"v2",
"/",
"?key",
"=",
"samplekey&",
"favicon_domain",
"=",
"twitter",
".",
"com&height",
"=",
"100&width",
"=",
"100&zoom_face",
"=",
"True&",
"image_url",
"=",
"http%3A%2F%2Fa7",
".",
"twimg",
".",
"com%2Fa%2Fab76f",
".",
"jpg",
"Example",
"(",
"thumbnail",
"URL",
"from",
"a",
"record",
"that",
"came",
"in",
"the",
"response",
"of",
"our",
"Search",
"API",
")",
":",
">>>",
"from",
"osrframework",
".",
"thirdparties",
".",
"pipl_com",
".",
"lib",
".",
"thumbnail",
"import",
"generate_thumbnail_url",
">>>",
"generate_thumbnail_url",
"(",
"record",
".",
"images",
"[",
"0",
"]",
".",
"url",
"100",
"100",
"favicon_domain",
"=",
"record",
".",
"source",
".",
"domain",
"api_key",
"=",
"samplekey",
")",
"http",
":",
"//",
"api",
".",
"pipl",
".",
"com",
"/",
"thumbnail",
"/",
"v2",
"/",
"?key",
"=",
"samplekey&",
"favicon_domain",
"=",
"twitter",
".",
"com&height",
"=",
"100&width",
"=",
"100&zoom_face",
"=",
"True&",
"image_url",
"=",
"http%3A%2F%2Fa7",
".",
"twimg",
".",
"com%2Fa%2Fab76f",
".",
"jpg"
] |
python
|
train
|
Shopify/shopify_python_api
|
shopify/session.py
|
https://github.com/Shopify/shopify_python_api/blob/88d3ba332fb2cd331f87517a16f2c2d4296cee90/shopify/session.py#L131-L138
|
def calculate_hmac(cls, params):
"""
Calculate the HMAC of the given parameters in line with Shopify's rules for OAuth authentication.
See http://docs.shopify.com/api/authentication/oauth#verification.
"""
encoded_params = cls.__encoded_params_for_signature(params)
# Generate the hex digest for the sorted parameters using the secret.
return hmac.new(cls.secret.encode(), encoded_params.encode(), sha256).hexdigest()
|
[
"def",
"calculate_hmac",
"(",
"cls",
",",
"params",
")",
":",
"encoded_params",
"=",
"cls",
".",
"__encoded_params_for_signature",
"(",
"params",
")",
"# Generate the hex digest for the sorted parameters using the secret.",
"return",
"hmac",
".",
"new",
"(",
"cls",
".",
"secret",
".",
"encode",
"(",
")",
",",
"encoded_params",
".",
"encode",
"(",
")",
",",
"sha256",
")",
".",
"hexdigest",
"(",
")"
] |
Calculate the HMAC of the given parameters in line with Shopify's rules for OAuth authentication.
See http://docs.shopify.com/api/authentication/oauth#verification.
|
[
"Calculate",
"the",
"HMAC",
"of",
"the",
"given",
"parameters",
"in",
"line",
"with",
"Shopify",
"s",
"rules",
"for",
"OAuth",
"authentication",
".",
"See",
"http",
":",
"//",
"docs",
".",
"shopify",
".",
"com",
"/",
"api",
"/",
"authentication",
"/",
"oauth#verification",
"."
] |
python
|
train
|
lreis2415/PyGeoC
|
pygeoc/hydro.py
|
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/hydro.py#L130-L135
|
def downstream_index(dir_value, i, j, alg='taudem'):
"""find downslope coordinate for D8 direction."""
assert alg.lower() in FlowModelConst.d8_deltas
delta = FlowModelConst.d8_deltas.get(alg.lower())
drow, dcol = delta[int(dir_value)]
return i + drow, j + dcol
|
[
"def",
"downstream_index",
"(",
"dir_value",
",",
"i",
",",
"j",
",",
"alg",
"=",
"'taudem'",
")",
":",
"assert",
"alg",
".",
"lower",
"(",
")",
"in",
"FlowModelConst",
".",
"d8_deltas",
"delta",
"=",
"FlowModelConst",
".",
"d8_deltas",
".",
"get",
"(",
"alg",
".",
"lower",
"(",
")",
")",
"drow",
",",
"dcol",
"=",
"delta",
"[",
"int",
"(",
"dir_value",
")",
"]",
"return",
"i",
"+",
"drow",
",",
"j",
"+",
"dcol"
] |
find downslope coordinate for D8 direction.
|
[
"find",
"downslope",
"coordinate",
"for",
"D8",
"direction",
"."
] |
python
|
train
|
nicolargo/glances
|
glances/plugins/glances_plugin.py
|
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_plugin.py#L625-L659
|
def manage_action(self,
stat_name,
trigger,
header,
action_key):
"""Manage the action for the current stat."""
# Here is a command line for the current trigger ?
try:
command, repeat = self.get_limit_action(trigger, stat_name=stat_name)
except KeyError:
# Reset the trigger
self.actions.set(stat_name, trigger)
else:
# Define the action key for the stats dict
# If not define, then it sets to header
if action_key is None:
action_key = header
# A command line is available for the current alert
# 1) Build the {{mustache}} dictionnary
if isinstance(self.get_stats_action(), list):
# If the stats are stored in a list of dict (fs plugin for exemple)
# Return the dict for the current header
mustache_dict = {}
for item in self.get_stats_action():
if item[self.get_key()] == action_key:
mustache_dict = item
break
else:
# Use the stats dict
mustache_dict = self.get_stats_action()
# 2) Run the action
self.actions.run(
stat_name, trigger,
command, repeat, mustache_dict=mustache_dict)
|
[
"def",
"manage_action",
"(",
"self",
",",
"stat_name",
",",
"trigger",
",",
"header",
",",
"action_key",
")",
":",
"# Here is a command line for the current trigger ?",
"try",
":",
"command",
",",
"repeat",
"=",
"self",
".",
"get_limit_action",
"(",
"trigger",
",",
"stat_name",
"=",
"stat_name",
")",
"except",
"KeyError",
":",
"# Reset the trigger",
"self",
".",
"actions",
".",
"set",
"(",
"stat_name",
",",
"trigger",
")",
"else",
":",
"# Define the action key for the stats dict",
"# If not define, then it sets to header",
"if",
"action_key",
"is",
"None",
":",
"action_key",
"=",
"header",
"# A command line is available for the current alert",
"# 1) Build the {{mustache}} dictionnary",
"if",
"isinstance",
"(",
"self",
".",
"get_stats_action",
"(",
")",
",",
"list",
")",
":",
"# If the stats are stored in a list of dict (fs plugin for exemple)",
"# Return the dict for the current header",
"mustache_dict",
"=",
"{",
"}",
"for",
"item",
"in",
"self",
".",
"get_stats_action",
"(",
")",
":",
"if",
"item",
"[",
"self",
".",
"get_key",
"(",
")",
"]",
"==",
"action_key",
":",
"mustache_dict",
"=",
"item",
"break",
"else",
":",
"# Use the stats dict",
"mustache_dict",
"=",
"self",
".",
"get_stats_action",
"(",
")",
"# 2) Run the action",
"self",
".",
"actions",
".",
"run",
"(",
"stat_name",
",",
"trigger",
",",
"command",
",",
"repeat",
",",
"mustache_dict",
"=",
"mustache_dict",
")"
] |
Manage the action for the current stat.
|
[
"Manage",
"the",
"action",
"for",
"the",
"current",
"stat",
"."
] |
python
|
train
|
pyopenapi/pyswagger
|
pyswagger/scanner/v2_0/merge.py
|
https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/scanner/v2_0/merge.py#L18-L42
|
def _merge(obj, app, creator, parser):
""" resolve $ref, and inject/merge referenced object to self.
This operation should be carried in a cascade manner.
"""
result = creator(NullContext())
result.merge(obj, parser)
guard = CycleGuard()
guard.update(obj)
r = getattr(obj, '$ref')
while r and len(r) > 0:
ro = app.resolve(r, parser)
if ro.__class__ != obj.__class__:
raise TypeError('Referenced Type mismatch: {0}'.format(r))
try:
guard.update(ro)
except CycleDetectionError:
# avoid infinite loop,
# cycle detection has a dedicated scanner.
break
result.merge(ro, parser)
r = getattr(ro, '$ref')
return result
|
[
"def",
"_merge",
"(",
"obj",
",",
"app",
",",
"creator",
",",
"parser",
")",
":",
"result",
"=",
"creator",
"(",
"NullContext",
"(",
")",
")",
"result",
".",
"merge",
"(",
"obj",
",",
"parser",
")",
"guard",
"=",
"CycleGuard",
"(",
")",
"guard",
".",
"update",
"(",
"obj",
")",
"r",
"=",
"getattr",
"(",
"obj",
",",
"'$ref'",
")",
"while",
"r",
"and",
"len",
"(",
"r",
")",
">",
"0",
":",
"ro",
"=",
"app",
".",
"resolve",
"(",
"r",
",",
"parser",
")",
"if",
"ro",
".",
"__class__",
"!=",
"obj",
".",
"__class__",
":",
"raise",
"TypeError",
"(",
"'Referenced Type mismatch: {0}'",
".",
"format",
"(",
"r",
")",
")",
"try",
":",
"guard",
".",
"update",
"(",
"ro",
")",
"except",
"CycleDetectionError",
":",
"# avoid infinite loop,",
"# cycle detection has a dedicated scanner.",
"break",
"result",
".",
"merge",
"(",
"ro",
",",
"parser",
")",
"r",
"=",
"getattr",
"(",
"ro",
",",
"'$ref'",
")",
"return",
"result"
] |
resolve $ref, and inject/merge referenced object to self.
This operation should be carried in a cascade manner.
|
[
"resolve",
"$ref",
"and",
"inject",
"/",
"merge",
"referenced",
"object",
"to",
"self",
".",
"This",
"operation",
"should",
"be",
"carried",
"in",
"a",
"cascade",
"manner",
"."
] |
python
|
train
|
galaxy-genome-annotation/python-apollo
|
apollo/users/__init__.py
|
https://github.com/galaxy-genome-annotation/python-apollo/blob/2bc9991302abe4402ec2885dcaac35915475b387/apollo/users/__init__.py#L169-L203
|
def create_user(self, email, first_name, last_name, password, role="user", metadata={}):
"""
Create a new user
:type email: str
:param email: User's email
:type first_name: str
:param first_name: User's first name
:type last_name: str
:param last_name: User's last name
:type password: str
:param password: User's password
:type role: str
:param role: User's default role, one of "admin" or "user"
:type metadata: dict
:param metadata: User metadata
:rtype: dict
:return: an empty dictionary
"""
data = {
'firstName': first_name,
'lastName': last_name,
'email': email,
'metadata': metadata,
'role': role.upper() if role else role,
'newPassword': password,
}
response = self.post('createUser', data)
return self._handle_empty(email, response)
|
[
"def",
"create_user",
"(",
"self",
",",
"email",
",",
"first_name",
",",
"last_name",
",",
"password",
",",
"role",
"=",
"\"user\"",
",",
"metadata",
"=",
"{",
"}",
")",
":",
"data",
"=",
"{",
"'firstName'",
":",
"first_name",
",",
"'lastName'",
":",
"last_name",
",",
"'email'",
":",
"email",
",",
"'metadata'",
":",
"metadata",
",",
"'role'",
":",
"role",
".",
"upper",
"(",
")",
"if",
"role",
"else",
"role",
",",
"'newPassword'",
":",
"password",
",",
"}",
"response",
"=",
"self",
".",
"post",
"(",
"'createUser'",
",",
"data",
")",
"return",
"self",
".",
"_handle_empty",
"(",
"email",
",",
"response",
")"
] |
Create a new user
:type email: str
:param email: User's email
:type first_name: str
:param first_name: User's first name
:type last_name: str
:param last_name: User's last name
:type password: str
:param password: User's password
:type role: str
:param role: User's default role, one of "admin" or "user"
:type metadata: dict
:param metadata: User metadata
:rtype: dict
:return: an empty dictionary
|
[
"Create",
"a",
"new",
"user"
] |
python
|
train
|
reorx/torext
|
torext/handlers/base.py
|
https://github.com/reorx/torext/blob/84c4300ebc7fab0dbd11cf8b020bc7d4d1570171/torext/handlers/base.py#L291-L329
|
def finish(self):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice. May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if not hasattr(self, '_stream_queue') or not self._stream_queue:
raise RuntimeError("`_stream_queue` was not assigned, you should"
"call `write_stream_queue` to set.")
# === Replace `if not self._headers_written` === #
self.set_status(200)
self.set_header("Content-Type", "text/event-stream")
self.set_header("Cache-Control", "no-cache")
self.set_header("Access-Control-Allow-Origin", "*")
# ============================================== #
self.request.connection.set_close_callback(None)
# === Add before self.flush === #
# Reset buffer
self._write_buffer = []
self._headers_written = False
# ============================= #
self.flush(include_footers=True)
# === Add after self.flush === #
self._write_buffer = self._stream_queue
self.request.connection._write_buffer = self._stream_queue
# ============================ #
self.request.finish()
self._log()
self._finished = True
self.on_finish()
# Break up a reference cycle between this handler and the
# _ui_module closures to allow for faster GC on CPython.
self.ui = None
|
[
"def",
"finish",
"(",
"self",
")",
":",
"if",
"self",
".",
"_finished",
":",
"raise",
"RuntimeError",
"(",
"\"finish() called twice. May be caused \"",
"\"by using async operations without the \"",
"\"@asynchronous decorator.\"",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'_stream_queue'",
")",
"or",
"not",
"self",
".",
"_stream_queue",
":",
"raise",
"RuntimeError",
"(",
"\"`_stream_queue` was not assigned, you should\"",
"\"call `write_stream_queue` to set.\"",
")",
"# === Replace `if not self._headers_written` === #",
"self",
".",
"set_status",
"(",
"200",
")",
"self",
".",
"set_header",
"(",
"\"Content-Type\"",
",",
"\"text/event-stream\"",
")",
"self",
".",
"set_header",
"(",
"\"Cache-Control\"",
",",
"\"no-cache\"",
")",
"self",
".",
"set_header",
"(",
"\"Access-Control-Allow-Origin\"",
",",
"\"*\"",
")",
"# ============================================== #",
"self",
".",
"request",
".",
"connection",
".",
"set_close_callback",
"(",
"None",
")",
"# === Add before self.flush === #",
"# Reset buffer",
"self",
".",
"_write_buffer",
"=",
"[",
"]",
"self",
".",
"_headers_written",
"=",
"False",
"# ============================= #",
"self",
".",
"flush",
"(",
"include_footers",
"=",
"True",
")",
"# === Add after self.flush === #",
"self",
".",
"_write_buffer",
"=",
"self",
".",
"_stream_queue",
"self",
".",
"request",
".",
"connection",
".",
"_write_buffer",
"=",
"self",
".",
"_stream_queue",
"# ============================ #",
"self",
".",
"request",
".",
"finish",
"(",
")",
"self",
".",
"_log",
"(",
")",
"self",
".",
"_finished",
"=",
"True",
"self",
".",
"on_finish",
"(",
")",
"# Break up a reference cycle between this handler and the",
"# _ui_module closures to allow for faster GC on CPython.",
"self",
".",
"ui",
"=",
"None"
] |
Finishes this response, ending the HTTP request.
|
[
"Finishes",
"this",
"response",
"ending",
"the",
"HTTP",
"request",
"."
] |
python
|
train
|
klahnakoski/mo-files
|
mo_files/__init__.py
|
https://github.com/klahnakoski/mo-files/blob/f6974a997cdc9fdabccb60c19edee13356a5787a/mo_files/__init__.py#L162-L176
|
def find(self, pattern):
"""
:param pattern: REGULAR EXPRESSION TO MATCH NAME (NOT INCLUDING PATH)
:return: LIST OF File OBJECTS THAT HAVE MATCHING NAME
"""
output = []
def _find(dir):
if re.match(pattern, dir._filename.split("/")[-1]):
output.append(dir)
if dir.is_directory():
for c in dir.children:
_find(c)
_find(self)
return output
|
[
"def",
"find",
"(",
"self",
",",
"pattern",
")",
":",
"output",
"=",
"[",
"]",
"def",
"_find",
"(",
"dir",
")",
":",
"if",
"re",
".",
"match",
"(",
"pattern",
",",
"dir",
".",
"_filename",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
")",
":",
"output",
".",
"append",
"(",
"dir",
")",
"if",
"dir",
".",
"is_directory",
"(",
")",
":",
"for",
"c",
"in",
"dir",
".",
"children",
":",
"_find",
"(",
"c",
")",
"_find",
"(",
"self",
")",
"return",
"output"
] |
:param pattern: REGULAR EXPRESSION TO MATCH NAME (NOT INCLUDING PATH)
:return: LIST OF File OBJECTS THAT HAVE MATCHING NAME
|
[
":",
"param",
"pattern",
":",
"REGULAR",
"EXPRESSION",
"TO",
"MATCH",
"NAME",
"(",
"NOT",
"INCLUDING",
"PATH",
")",
":",
"return",
":",
"LIST",
"OF",
"File",
"OBJECTS",
"THAT",
"HAVE",
"MATCHING",
"NAME"
] |
python
|
train
|
molmod/molmod
|
molmod/graphs.py
|
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1395-L1400
|
def iter_initial_relations(self, subject_graph):
"""Iterate over all valid initial relations for a match"""
if self.pattern_graph.num_edges != subject_graph.num_edges:
return # don't even try
for pair in CustomPattern.iter_initial_relations(self, subject_graph):
yield pair
|
[
"def",
"iter_initial_relations",
"(",
"self",
",",
"subject_graph",
")",
":",
"if",
"self",
".",
"pattern_graph",
".",
"num_edges",
"!=",
"subject_graph",
".",
"num_edges",
":",
"return",
"# don't even try",
"for",
"pair",
"in",
"CustomPattern",
".",
"iter_initial_relations",
"(",
"self",
",",
"subject_graph",
")",
":",
"yield",
"pair"
] |
Iterate over all valid initial relations for a match
|
[
"Iterate",
"over",
"all",
"valid",
"initial",
"relations",
"for",
"a",
"match"
] |
python
|
train
|
Duke-GCB/DukeDSClient
|
ddsc/ddsclient.py
|
https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/ddsclient.py#L217-L228
|
def run(self, args):
"""
Give the user with user_full_name the auth_role permissions on the remote project with project_name.
:param args Namespace arguments parsed from the command line
"""
email = args.email # email of person to give permissions, will be None if username is specified
username = args.username # username of person to give permissions, will be None if email is specified
auth_role = args.auth_role # type of permission(project_admin)
project = self.fetch_project(args, must_exist=True, include_children=False)
user = self.remote_store.lookup_or_register_user_by_email_or_username(email, username)
self.remote_store.set_user_project_permission(project, user, auth_role)
print(u'Gave user {} {} permissions for project {}.'.format(user.full_name, auth_role, project.name))
|
[
"def",
"run",
"(",
"self",
",",
"args",
")",
":",
"email",
"=",
"args",
".",
"email",
"# email of person to give permissions, will be None if username is specified",
"username",
"=",
"args",
".",
"username",
"# username of person to give permissions, will be None if email is specified",
"auth_role",
"=",
"args",
".",
"auth_role",
"# type of permission(project_admin)",
"project",
"=",
"self",
".",
"fetch_project",
"(",
"args",
",",
"must_exist",
"=",
"True",
",",
"include_children",
"=",
"False",
")",
"user",
"=",
"self",
".",
"remote_store",
".",
"lookup_or_register_user_by_email_or_username",
"(",
"email",
",",
"username",
")",
"self",
".",
"remote_store",
".",
"set_user_project_permission",
"(",
"project",
",",
"user",
",",
"auth_role",
")",
"print",
"(",
"u'Gave user {} {} permissions for project {}.'",
".",
"format",
"(",
"user",
".",
"full_name",
",",
"auth_role",
",",
"project",
".",
"name",
")",
")"
] |
Give the user with user_full_name the auth_role permissions on the remote project with project_name.
:param args Namespace arguments parsed from the command line
|
[
"Give",
"the",
"user",
"with",
"user_full_name",
"the",
"auth_role",
"permissions",
"on",
"the",
"remote",
"project",
"with",
"project_name",
".",
":",
"param",
"args",
"Namespace",
"arguments",
"parsed",
"from",
"the",
"command",
"line"
] |
python
|
train
|
google/pyringe
|
pyringe/plugins/read_only.py
|
https://github.com/google/pyringe/blob/76dff5d1ac29cd5e7bf32677654a83291a15ad8a/pyringe/plugins/read_only.py#L52-L60
|
def Backtrace(self, to_string=False):
"""Get a backtrace of the current position."""
if self.inferior.is_running:
res = self.inferior.Backtrace()
if to_string:
return res
print res
else:
logging.error('Not attached to any process.')
|
[
"def",
"Backtrace",
"(",
"self",
",",
"to_string",
"=",
"False",
")",
":",
"if",
"self",
".",
"inferior",
".",
"is_running",
":",
"res",
"=",
"self",
".",
"inferior",
".",
"Backtrace",
"(",
")",
"if",
"to_string",
":",
"return",
"res",
"print",
"res",
"else",
":",
"logging",
".",
"error",
"(",
"'Not attached to any process.'",
")"
] |
Get a backtrace of the current position.
|
[
"Get",
"a",
"backtrace",
"of",
"the",
"current",
"position",
"."
] |
python
|
train
|
macbre/sql-metadata
|
sql_metadata.py
|
https://github.com/macbre/sql-metadata/blob/4b7b4ae0a961d568075aefe78535cf5aee74583c/sql_metadata.py#L30-L48
|
def preprocess_query(query):
"""
Perform initial query cleanup
:type query str
:rtype str
"""
# 1. remove aliases
# FROM `dimension_wikis` `dw`
# INNER JOIN `fact_wam_scores` `fwN`
query = re.sub(r'(\s(FROM|JOIN)\s`[^`]+`)\s`[^`]+`', r'\1', query, flags=re.IGNORECASE)
# 2. `database`.`table` notation -> database.table
query = re.sub(r'`([^`]+)`\.`([^`]+)`', r'\1.\2', query)
# 2. database.table notation -> table
# query = re.sub(r'([a-z_0-9]+)\.([a-z_0-9]+)', r'\2', query, flags=re.IGNORECASE)
return query
|
[
"def",
"preprocess_query",
"(",
"query",
")",
":",
"# 1. remove aliases",
"# FROM `dimension_wikis` `dw`",
"# INNER JOIN `fact_wam_scores` `fwN`",
"query",
"=",
"re",
".",
"sub",
"(",
"r'(\\s(FROM|JOIN)\\s`[^`]+`)\\s`[^`]+`'",
",",
"r'\\1'",
",",
"query",
",",
"flags",
"=",
"re",
".",
"IGNORECASE",
")",
"# 2. `database`.`table` notation -> database.table",
"query",
"=",
"re",
".",
"sub",
"(",
"r'`([^`]+)`\\.`([^`]+)`'",
",",
"r'\\1.\\2'",
",",
"query",
")",
"# 2. database.table notation -> table",
"# query = re.sub(r'([a-z_0-9]+)\\.([a-z_0-9]+)', r'\\2', query, flags=re.IGNORECASE)",
"return",
"query"
] |
Perform initial query cleanup
:type query str
:rtype str
|
[
"Perform",
"initial",
"query",
"cleanup"
] |
python
|
train
|
phaethon/kamene
|
kamene/contrib/igmpv3.py
|
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/igmpv3.py#L123-L141
|
def float_encode(self, value):
"""Convert the integer value to its IGMPv3 encoded time value if needed.
If value < 128, return the value specified. If >= 128, encode as a floating
point value. Value can be 0 - 31744.
"""
if value < 128:
code = value
elif value > 31743:
code = 255
else:
exp=0
value>>=3
while(value>31):
exp+=1
value>>=1
exp<<=4
code = 0x80 | exp | (value & 0x0F)
return code
|
[
"def",
"float_encode",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"<",
"128",
":",
"code",
"=",
"value",
"elif",
"value",
">",
"31743",
":",
"code",
"=",
"255",
"else",
":",
"exp",
"=",
"0",
"value",
">>=",
"3",
"while",
"(",
"value",
">",
"31",
")",
":",
"exp",
"+=",
"1",
"value",
">>=",
"1",
"exp",
"<<=",
"4",
"code",
"=",
"0x80",
"|",
"exp",
"|",
"(",
"value",
"&",
"0x0F",
")",
"return",
"code"
] |
Convert the integer value to its IGMPv3 encoded time value if needed.
If value < 128, return the value specified. If >= 128, encode as a floating
point value. Value can be 0 - 31744.
|
[
"Convert",
"the",
"integer",
"value",
"to",
"its",
"IGMPv3",
"encoded",
"time",
"value",
"if",
"needed",
".",
"If",
"value",
"<",
"128",
"return",
"the",
"value",
"specified",
".",
"If",
">",
"=",
"128",
"encode",
"as",
"a",
"floating",
"point",
"value",
".",
"Value",
"can",
"be",
"0",
"-",
"31744",
"."
] |
python
|
train
|
DocNow/twarc
|
twarc/client.py
|
https://github.com/DocNow/twarc/blob/47dd87d0c00592a4d583412c9d660ba574fc6f26/twarc/client.py#L254-L278
|
def follower_ids(self, user):
"""
Returns Twitter user id lists for the specified user's followers.
A user can be a specific using their screen_name or user_id
"""
user = str(user)
user = user.lstrip('@')
url = 'https://api.twitter.com/1.1/followers/ids.json'
if re.match(r'^\d+$', user):
params = {'user_id': user, 'cursor': -1}
else:
params = {'screen_name': user, 'cursor': -1}
while params['cursor'] != 0:
try:
resp = self.get(url, params=params, allow_404=True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
log.info("no users matching %s", screen_name)
raise e
user_ids = resp.json()
for user_id in user_ids['ids']:
yield str_type(user_id)
params['cursor'] = user_ids['next_cursor']
|
[
"def",
"follower_ids",
"(",
"self",
",",
"user",
")",
":",
"user",
"=",
"str",
"(",
"user",
")",
"user",
"=",
"user",
".",
"lstrip",
"(",
"'@'",
")",
"url",
"=",
"'https://api.twitter.com/1.1/followers/ids.json'",
"if",
"re",
".",
"match",
"(",
"r'^\\d+$'",
",",
"user",
")",
":",
"params",
"=",
"{",
"'user_id'",
":",
"user",
",",
"'cursor'",
":",
"-",
"1",
"}",
"else",
":",
"params",
"=",
"{",
"'screen_name'",
":",
"user",
",",
"'cursor'",
":",
"-",
"1",
"}",
"while",
"params",
"[",
"'cursor'",
"]",
"!=",
"0",
":",
"try",
":",
"resp",
"=",
"self",
".",
"get",
"(",
"url",
",",
"params",
"=",
"params",
",",
"allow_404",
"=",
"True",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"e",
":",
"if",
"e",
".",
"response",
".",
"status_code",
"==",
"404",
":",
"log",
".",
"info",
"(",
"\"no users matching %s\"",
",",
"screen_name",
")",
"raise",
"e",
"user_ids",
"=",
"resp",
".",
"json",
"(",
")",
"for",
"user_id",
"in",
"user_ids",
"[",
"'ids'",
"]",
":",
"yield",
"str_type",
"(",
"user_id",
")",
"params",
"[",
"'cursor'",
"]",
"=",
"user_ids",
"[",
"'next_cursor'",
"]"
] |
Returns Twitter user id lists for the specified user's followers.
A user can be a specific using their screen_name or user_id
|
[
"Returns",
"Twitter",
"user",
"id",
"lists",
"for",
"the",
"specified",
"user",
"s",
"followers",
".",
"A",
"user",
"can",
"be",
"a",
"specific",
"using",
"their",
"screen_name",
"or",
"user_id"
] |
python
|
train
|
ic-labs/django-icekit
|
icekit/content_collections/abstract_models.py
|
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/content_collections/abstract_models.py#L159-L181
|
def get_response(self, request, parent, *args, **kwargs):
"""
Render this collected content to a response.
:param request: the request
:param parent: the parent collection
:param args:
:param kwargs:
:return:
"""
context = {
'page': self,
}
try:
return TemplateResponse(
request,
self.get_layout_template_name(),
context
)
except AttributeError:
raise AttributeError("You need to define "
"`get_layout_template_name()` on your `%s` model, "
"or override `get_response()`" % type(self).__name__)
|
[
"def",
"get_response",
"(",
"self",
",",
"request",
",",
"parent",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"context",
"=",
"{",
"'page'",
":",
"self",
",",
"}",
"try",
":",
"return",
"TemplateResponse",
"(",
"request",
",",
"self",
".",
"get_layout_template_name",
"(",
")",
",",
"context",
")",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"\"You need to define \"",
"\"`get_layout_template_name()` on your `%s` model, \"",
"\"or override `get_response()`\"",
"%",
"type",
"(",
"self",
")",
".",
"__name__",
")"
] |
Render this collected content to a response.
:param request: the request
:param parent: the parent collection
:param args:
:param kwargs:
:return:
|
[
"Render",
"this",
"collected",
"content",
"to",
"a",
"response",
"."
] |
python
|
train
|
cuihantao/andes
|
andes/variables/call.py
|
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/variables/call.py#L178-L188
|
def _compile_bus_injection(self):
"""Impose injections on buses"""
string = '"""\n'
for device, series in zip(self.devices, self.series):
if series:
string += 'system.' + device + '.gcall(system.dae)\n'
string += '\n'
string += 'system.dae.reset_small_g()\n'
string += self.gisland
string += '"""'
self.bus_injection = compile(eval(string), '', 'exec')
|
[
"def",
"_compile_bus_injection",
"(",
"self",
")",
":",
"string",
"=",
"'\"\"\"\\n'",
"for",
"device",
",",
"series",
"in",
"zip",
"(",
"self",
".",
"devices",
",",
"self",
".",
"series",
")",
":",
"if",
"series",
":",
"string",
"+=",
"'system.'",
"+",
"device",
"+",
"'.gcall(system.dae)\\n'",
"string",
"+=",
"'\\n'",
"string",
"+=",
"'system.dae.reset_small_g()\\n'",
"string",
"+=",
"self",
".",
"gisland",
"string",
"+=",
"'\"\"\"'",
"self",
".",
"bus_injection",
"=",
"compile",
"(",
"eval",
"(",
"string",
")",
",",
"''",
",",
"'exec'",
")"
] |
Impose injections on buses
|
[
"Impose",
"injections",
"on",
"buses"
] |
python
|
train
|
polysquare/polysquare-setuptools-lint
|
polysquare_setuptools_lint/__init__.py
|
https://github.com/polysquare/polysquare-setuptools-lint/blob/5df5a6401c7ad6a90b42230eeb99c82cc56952b6/polysquare_setuptools_lint/__init__.py#L457-L463
|
def _is_excluded(filename, exclusions):
"""Return true if filename matches any of exclusions."""
for exclusion in exclusions:
if fnmatch(filename, exclusion):
return True
return False
|
[
"def",
"_is_excluded",
"(",
"filename",
",",
"exclusions",
")",
":",
"for",
"exclusion",
"in",
"exclusions",
":",
"if",
"fnmatch",
"(",
"filename",
",",
"exclusion",
")",
":",
"return",
"True",
"return",
"False"
] |
Return true if filename matches any of exclusions.
|
[
"Return",
"true",
"if",
"filename",
"matches",
"any",
"of",
"exclusions",
"."
] |
python
|
train
|
Azure/azure-multiapi-storage-python
|
azure/multiapi/storage/v2015_04_05/table/tableservice.py
|
https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/table/tableservice.py#L353-L383
|
def list_tables(self, num_results=None, marker=None, timeout=None):
'''
Returns a generator to list the tables. The generator will lazily follow
the continuation tokens returned by the service and stop when all tables
have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
tables, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param int num_results:
The maximum number of tables to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.models.table.Table` objects.
:rtype: :class:`~azure.storage.models.ListGenerator`:
'''
kwargs = {'max_results': num_results, 'marker': marker, 'timeout': timeout}
resp = self._list_tables(**kwargs)
return ListGenerator(resp, self._list_tables, (), kwargs)
|
[
"def",
"list_tables",
"(",
"self",
",",
"num_results",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"kwargs",
"=",
"{",
"'max_results'",
":",
"num_results",
",",
"'marker'",
":",
"marker",
",",
"'timeout'",
":",
"timeout",
"}",
"resp",
"=",
"self",
".",
"_list_tables",
"(",
"*",
"*",
"kwargs",
")",
"return",
"ListGenerator",
"(",
"resp",
",",
"self",
".",
"_list_tables",
",",
"(",
")",
",",
"kwargs",
")"
] |
Returns a generator to list the tables. The generator will lazily follow
the continuation tokens returned by the service and stop when all tables
have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
tables, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param int num_results:
The maximum number of tables to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.models.table.Table` objects.
:rtype: :class:`~azure.storage.models.ListGenerator`:
|
[
"Returns",
"a",
"generator",
"to",
"list",
"the",
"tables",
".",
"The",
"generator",
"will",
"lazily",
"follow",
"the",
"continuation",
"tokens",
"returned",
"by",
"the",
"service",
"and",
"stop",
"when",
"all",
"tables",
"have",
"been",
"returned",
"or",
"num_results",
"is",
"reached",
"."
] |
python
|
train
|
abe-winter/pg13-py
|
pg13/sqparse2.py
|
https://github.com/abe-winter/pg13-py/blob/c78806f99f35541a8756987e86edca3438aa97f5/pg13/sqparse2.py#L332-L336
|
def p_assignlist(self,t):
"assignlist : assignlist ',' assign \n | assign"
if len(t)==4: t[0] = t[1] + [t[3]]
elif len(t)==2: t[0] = [t[1]]
else: raise NotImplementedError('unk_len', len(t)) # pragma: no cover
|
[
"def",
"p_assignlist",
"(",
"self",
",",
"t",
")",
":",
"if",
"len",
"(",
"t",
")",
"==",
"4",
":",
"t",
"[",
"0",
"]",
"=",
"t",
"[",
"1",
"]",
"+",
"[",
"t",
"[",
"3",
"]",
"]",
"elif",
"len",
"(",
"t",
")",
"==",
"2",
":",
"t",
"[",
"0",
"]",
"=",
"[",
"t",
"[",
"1",
"]",
"]",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'unk_len'",
",",
"len",
"(",
"t",
")",
")",
"# pragma: no cover"
] |
assignlist : assignlist ',' assign \n | assign
|
[
"assignlist",
":",
"assignlist",
"assign",
"\\",
"n",
"|",
"assign"
] |
python
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.