repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
minhhoit/yacms
yacms/twitter/admin.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/twitter/admin.py#L52-L62
def save_model(self, request, obj, form, change): """ Sends a tweet with the title/short_url if applicable. """ super(TweetableAdminMixin, self).save_model(request, obj, form, change) if Api and request.POST.get("send_tweet", False): auth_settings = get_auth_settings() obj.set_short_url() message = truncatechars(obj, 140 - len(obj.short_url) - 1) api = Api(*auth_settings) api.PostUpdate("%s %s" % (message, obj.short_url))
[ "def", "save_model", "(", "self", ",", "request", ",", "obj", ",", "form", ",", "change", ")", ":", "super", "(", "TweetableAdminMixin", ",", "self", ")", ".", "save_model", "(", "request", ",", "obj", ",", "form", ",", "change", ")", "if", "Api", "and", "request", ".", "POST", ".", "get", "(", "\"send_tweet\"", ",", "False", ")", ":", "auth_settings", "=", "get_auth_settings", "(", ")", "obj", ".", "set_short_url", "(", ")", "message", "=", "truncatechars", "(", "obj", ",", "140", "-", "len", "(", "obj", ".", "short_url", ")", "-", "1", ")", "api", "=", "Api", "(", "*", "auth_settings", ")", "api", ".", "PostUpdate", "(", "\"%s %s\"", "%", "(", "message", ",", "obj", ".", "short_url", ")", ")" ]
Sends a tweet with the title/short_url if applicable.
[ "Sends", "a", "tweet", "with", "the", "title", "/", "short_url", "if", "applicable", "." ]
python
train
46.818182
pyQode/pyqode.core
pyqode/core/widgets/splittable_tab_widget.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/splittable_tab_widget.py#L749-L757
def add_context_action(self, action): """ Adds a custom context menu action :param action: action to add. """ self.main_tab_widget.context_actions.append(action) for child_splitter in self.child_splitters: child_splitter.add_context_action(action)
[ "def", "add_context_action", "(", "self", ",", "action", ")", ":", "self", ".", "main_tab_widget", ".", "context_actions", ".", "append", "(", "action", ")", "for", "child_splitter", "in", "self", ".", "child_splitters", ":", "child_splitter", ".", "add_context_action", "(", "action", ")" ]
Adds a custom context menu action :param action: action to add.
[ "Adds", "a", "custom", "context", "menu", "action" ]
python
train
33.333333
Thermondo/django-heroku-connect
heroku_connect/models.py
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/models.py#L173-L212
def capture_update_from_model(cls, table_name, record_id, *, update_fields=()): """ Create a fresh update record from the current model state in the database. For read-write connected models, this will lead to the attempted update of the values of a corresponding object in Salesforce. Args: table_name (str): The name of the table backing the connected model (without schema) record_id (int): The primary id of the connected model update_fields (Iterable[str]): If given, the names of fields that will be included in the write record Returns: A list of the created TriggerLog entries (usually one). Raises: LookupError: if ``table_name`` does not belong to a connected model """ include_cols = () if update_fields: model_cls = get_connected_model_for_table_name(table_name) include_cols = cls._fieldnames_to_colnames(model_cls, update_fields) raw_query = sql.SQL(""" SELECT {schema}.hc_capture_update_from_row( hstore({schema}.{table_name}.*), %(table_name)s, ARRAY[{include_cols}]::text[] -- cast to type expected by stored procedure ) AS id FROM {schema}.{table_name} WHERE id = %(record_id)s """).format( schema=sql.Identifier(settings.HEROKU_CONNECT_SCHEMA), table_name=sql.Identifier(table_name), include_cols=sql.SQL(', ').join(sql.Identifier(col) for col in include_cols), ) params = {'record_id': record_id, 'table_name': table_name} result_qs = TriggerLog.objects.raw(raw_query, params) return list(result_qs)
[ "def", "capture_update_from_model", "(", "cls", ",", "table_name", ",", "record_id", ",", "*", ",", "update_fields", "=", "(", ")", ")", ":", "include_cols", "=", "(", ")", "if", "update_fields", ":", "model_cls", "=", "get_connected_model_for_table_name", "(", "table_name", ")", "include_cols", "=", "cls", ".", "_fieldnames_to_colnames", "(", "model_cls", ",", "update_fields", ")", "raw_query", "=", "sql", ".", "SQL", "(", "\"\"\"\n SELECT {schema}.hc_capture_update_from_row(\n hstore({schema}.{table_name}.*),\n %(table_name)s,\n ARRAY[{include_cols}]::text[] -- cast to type expected by stored procedure\n ) AS id\n FROM {schema}.{table_name}\n WHERE id = %(record_id)s\n \"\"\"", ")", ".", "format", "(", "schema", "=", "sql", ".", "Identifier", "(", "settings", ".", "HEROKU_CONNECT_SCHEMA", ")", ",", "table_name", "=", "sql", ".", "Identifier", "(", "table_name", ")", ",", "include_cols", "=", "sql", ".", "SQL", "(", "', '", ")", ".", "join", "(", "sql", ".", "Identifier", "(", "col", ")", "for", "col", "in", "include_cols", ")", ",", ")", "params", "=", "{", "'record_id'", ":", "record_id", ",", "'table_name'", ":", "table_name", "}", "result_qs", "=", "TriggerLog", ".", "objects", ".", "raw", "(", "raw_query", ",", "params", ")", "return", "list", "(", "result_qs", ")" ]
Create a fresh update record from the current model state in the database. For read-write connected models, this will lead to the attempted update of the values of a corresponding object in Salesforce. Args: table_name (str): The name of the table backing the connected model (without schema) record_id (int): The primary id of the connected model update_fields (Iterable[str]): If given, the names of fields that will be included in the write record Returns: A list of the created TriggerLog entries (usually one). Raises: LookupError: if ``table_name`` does not belong to a connected model
[ "Create", "a", "fresh", "update", "record", "from", "the", "current", "model", "state", "in", "the", "database", "." ]
python
train
43.5
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L132-L147
def defvalkey(js, key, default=None, take_none=True): """ Returns js[key] if set, otherwise default. Note js[key] can be None. :param js: :param key: :param default: :param take_none: :return: """ if js is None: return default if key not in js: return default if js[key] is None and not take_none: return default return js[key]
[ "def", "defvalkey", "(", "js", ",", "key", ",", "default", "=", "None", ",", "take_none", "=", "True", ")", ":", "if", "js", "is", "None", ":", "return", "default", "if", "key", "not", "in", "js", ":", "return", "default", "if", "js", "[", "key", "]", "is", "None", "and", "not", "take_none", ":", "return", "default", "return", "js", "[", "key", "]" ]
Returns js[key] if set, otherwise default. Note js[key] can be None. :param js: :param key: :param default: :param take_none: :return:
[ "Returns", "js", "[", "key", "]", "if", "set", "otherwise", "default", ".", "Note", "js", "[", "key", "]", "can", "be", "None", ".", ":", "param", "js", ":", ":", "param", "key", ":", ":", "param", "default", ":", ":", "param", "take_none", ":", ":", "return", ":" ]
python
train
24
backbohne/docx-xslt
docxxslt/engines.py
https://github.com/backbohne/docx-xslt/blob/d4cc76776a75b8213660c3c1717d42afe5189e15/docxxslt/engines.py#L63-L91
def xsl_elements(self): """Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements""" def append_xsl_elements(xsl_elements, r, xsl): if r is not None: r.xpath('.//w:t', namespaces=self.namespaces)[0].text = xsl xe = XslElement(r, logger=self.logger) xsl_elements.append(xe) return None, '' if not getattr(self, '_xsl_elements', None): xsl_elements = [] for p in self.root.xpath('.//w:p', namespaces=self.namespaces): xsl_r, xsl = None, '' for r in p: # find first XSL run and add all XSL meta text text = ''.join(t.text for t in r.xpath('.//w:t', namespaces=self.namespaces)) if r.xpath('.//w:rPr/w:rStyle[@w:val="%s"]' % self.style, namespaces=self.namespaces): xsl += text if xsl_r is None and text: xsl_r = r else: r.getparent().remove(r) elif text: xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl) xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl) self._xsl_elements = xsl_elements return self._xsl_elements
[ "def", "xsl_elements", "(", "self", ")", ":", "def", "append_xsl_elements", "(", "xsl_elements", ",", "r", ",", "xsl", ")", ":", "if", "r", "is", "not", "None", ":", "r", ".", "xpath", "(", "'.//w:t'", ",", "namespaces", "=", "self", ".", "namespaces", ")", "[", "0", "]", ".", "text", "=", "xsl", "xe", "=", "XslElement", "(", "r", ",", "logger", "=", "self", ".", "logger", ")", "xsl_elements", ".", "append", "(", "xe", ")", "return", "None", ",", "''", "if", "not", "getattr", "(", "self", ",", "'_xsl_elements'", ",", "None", ")", ":", "xsl_elements", "=", "[", "]", "for", "p", "in", "self", ".", "root", ".", "xpath", "(", "'.//w:p'", ",", "namespaces", "=", "self", ".", "namespaces", ")", ":", "xsl_r", ",", "xsl", "=", "None", ",", "''", "for", "r", "in", "p", ":", "# find first XSL run and add all XSL meta text", "text", "=", "''", ".", "join", "(", "t", ".", "text", "for", "t", "in", "r", ".", "xpath", "(", "'.//w:t'", ",", "namespaces", "=", "self", ".", "namespaces", ")", ")", "if", "r", ".", "xpath", "(", "'.//w:rPr/w:rStyle[@w:val=\"%s\"]'", "%", "self", ".", "style", ",", "namespaces", "=", "self", ".", "namespaces", ")", ":", "xsl", "+=", "text", "if", "xsl_r", "is", "None", "and", "text", ":", "xsl_r", "=", "r", "else", ":", "r", ".", "getparent", "(", ")", ".", "remove", "(", "r", ")", "elif", "text", ":", "xsl_r", ",", "xsl", "=", "append_xsl_elements", "(", "xsl_elements", ",", "xsl_r", ",", "xsl", ")", "xsl_r", ",", "xsl", "=", "append_xsl_elements", "(", "xsl_elements", ",", "xsl_r", ",", "xsl", ")", "self", ".", "_xsl_elements", "=", "xsl_elements", "return", "self", ".", "_xsl_elements" ]
Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements
[ "Find", "all", "XSL", "styled", "runs", "normalize", "related", "paragraph", "and", "returns", "list", "of", "XslElements" ]
python
train
46.827586
ultrabug/py3status
py3status/module.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/module.py#L270-L301
def set_updated(self): """ Mark the module as updated. We check if the actual content has changed and if so we trigger an update in py3status. """ # get latest output output = [] for method in self.methods.values(): data = method["last_output"] if isinstance(data, list): if self.testing and data: data[0]["cached_until"] = method.get("cached_until") output.extend(data) else: # if the output is not 'valid' then don't add it. if data.get("full_text") or "separator" in data: if self.testing: data["cached_until"] = method.get("cached_until") output.append(data) # if changed store and force display update. if output != self.last_output: # has the modules output become urgent? # we only care the update that this happens # not any after then. urgent = True in [x.get("urgent") for x in output] if urgent != self.urgent: self.urgent = urgent else: urgent = False self.last_output = output self._py3_wrapper.notify_update(self.module_full_name, urgent)
[ "def", "set_updated", "(", "self", ")", ":", "# get latest output", "output", "=", "[", "]", "for", "method", "in", "self", ".", "methods", ".", "values", "(", ")", ":", "data", "=", "method", "[", "\"last_output\"", "]", "if", "isinstance", "(", "data", ",", "list", ")", ":", "if", "self", ".", "testing", "and", "data", ":", "data", "[", "0", "]", "[", "\"cached_until\"", "]", "=", "method", ".", "get", "(", "\"cached_until\"", ")", "output", ".", "extend", "(", "data", ")", "else", ":", "# if the output is not 'valid' then don't add it.", "if", "data", ".", "get", "(", "\"full_text\"", ")", "or", "\"separator\"", "in", "data", ":", "if", "self", ".", "testing", ":", "data", "[", "\"cached_until\"", "]", "=", "method", ".", "get", "(", "\"cached_until\"", ")", "output", ".", "append", "(", "data", ")", "# if changed store and force display update.", "if", "output", "!=", "self", ".", "last_output", ":", "# has the modules output become urgent?", "# we only care the update that this happens", "# not any after then.", "urgent", "=", "True", "in", "[", "x", ".", "get", "(", "\"urgent\"", ")", "for", "x", "in", "output", "]", "if", "urgent", "!=", "self", ".", "urgent", ":", "self", ".", "urgent", "=", "urgent", "else", ":", "urgent", "=", "False", "self", ".", "last_output", "=", "output", "self", ".", "_py3_wrapper", ".", "notify_update", "(", "self", ".", "module_full_name", ",", "urgent", ")" ]
Mark the module as updated. We check if the actual content has changed and if so we trigger an update in py3status.
[ "Mark", "the", "module", "as", "updated", ".", "We", "check", "if", "the", "actual", "content", "has", "changed", "and", "if", "so", "we", "trigger", "an", "update", "in", "py3status", "." ]
python
train
41.03125
DarkEnergySurvey/ugali
ugali/utils/plotting.py
https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L141-L146
def projScatter(lon, lat, **kwargs): """ Create a scatter plot on HEALPix projected axes. Inputs: lon (deg), lat (deg) """ hp.projscatter(lon, lat, lonlat=True, **kwargs)
[ "def", "projScatter", "(", "lon", ",", "lat", ",", "*", "*", "kwargs", ")", ":", "hp", ".", "projscatter", "(", "lon", ",", "lat", ",", "lonlat", "=", "True", ",", "*", "*", "kwargs", ")" ]
Create a scatter plot on HEALPix projected axes. Inputs: lon (deg), lat (deg)
[ "Create", "a", "scatter", "plot", "on", "HEALPix", "projected", "axes", ".", "Inputs", ":", "lon", "(", "deg", ")", "lat", "(", "deg", ")" ]
python
train
30.833333
rytilahti/python-songpal
songpal/device.py
https://github.com/rytilahti/python-songpal/blob/0443de6b3d960b9067a851d82261ca00e46b4618/songpal/device.py#L304-L307
async def set_custom_eq(self, target: str, value: str) -> None: """Set custom EQ settings.""" params = {"settings": [{"target": target, "value": value}]} return await self.services["audio"]["setCustomEqualizerSettings"](params)
[ "async", "def", "set_custom_eq", "(", "self", ",", "target", ":", "str", ",", "value", ":", "str", ")", "->", "None", ":", "params", "=", "{", "\"settings\"", ":", "[", "{", "\"target\"", ":", "target", ",", "\"value\"", ":", "value", "}", "]", "}", "return", "await", "self", ".", "services", "[", "\"audio\"", "]", "[", "\"setCustomEqualizerSettings\"", "]", "(", "params", ")" ]
Set custom EQ settings.
[ "Set", "custom", "EQ", "settings", "." ]
python
train
62
wavycloud/pyboto3
pyboto3/mturk.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/mturk.py#L195-L442
def create_hit(MaxAssignments=None, AutoApprovalDelayInSeconds=None, LifetimeInSeconds=None, AssignmentDurationInSeconds=None, Reward=None, Title=None, Keywords=None, Description=None, Question=None, RequesterAnnotation=None, QualificationRequirements=None, UniqueRequestToken=None, AssignmentReviewPolicy=None, HITReviewPolicy=None, HITLayoutId=None, HITLayoutParameters=None): """ The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website. This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time. An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs. CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters . See also: AWS API Documentation :example: response = client.create_hit( MaxAssignments=123, AutoApprovalDelayInSeconds=123, LifetimeInSeconds=123, AssignmentDurationInSeconds=123, Reward='string', Title='string', Keywords='string', Description='string', Question='string', RequesterAnnotation='string', QualificationRequirements=[ { 'QualificationTypeId': 'string', 'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn', 'IntegerValues': [ 123, ], 'LocaleValues': [ { 'Country': 'string', 'Subdivision': 'string' }, ], 'RequiredToPreview': True|False }, ], UniqueRequestToken='string', AssignmentReviewPolicy={ 'PolicyName': 'string', 'Parameters': [ { 'Key': 'string', 'Values': [ 'string', ], 'MapEntries': [ { 'Key': 'string', 'Values': [ 'string', ] }, ] }, ] }, HITReviewPolicy={ 'PolicyName': 'string', 'Parameters': [ { 'Key': 'string', 'Values': [ 'string', ], 'MapEntries': [ { 'Key': 'string', 'Values': [ 'string', ] }, ] }, ] }, HITLayoutId='string', HITLayoutParameters=[ { 'Name': 'string', 'Value': 'string' }, ] ) :type MaxAssignments: integer :param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable. :type AutoApprovalDelayInSeconds: integer :param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it. :type LifetimeInSeconds: integer :param LifetimeInSeconds: [REQUIRED] An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted. :type AssignmentDurationInSeconds: integer :param AssignmentDurationInSeconds: [REQUIRED] The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept. :type Reward: string :param Reward: [REQUIRED] The amount of money the Requester will pay a Worker for successfully completing the HIT. :type Title: string :param Title: [REQUIRED] The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned. :type Keywords: string :param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs. :type Description: string :param Description: [REQUIRED] A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it. :type Question: string :param Question: The data the person completing the HIT uses to produce the results. Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace. Either a Question parameter or a HITLayoutId parameter must be provided. :type RequesterAnnotation: string :param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT. The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester. The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped. :type QualificationRequirements: list :param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT. (dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT. QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement. Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value. IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure. (integer) -- LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure. (dict) --The Locale data structure represents a geographical region or location. Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America. Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington. RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false. :type UniqueRequestToken: string :param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId. Note Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs. :type AssignmentReviewPolicy: dict :param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy. PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01 Parameters (list) --Name of the parameter from the Review policy. (dict) --Name of the parameter from the Review policy. Key (string) --Name of the parameter from the list of Review Polices. Values (list) --The list of values of the Parameter (string) -- MapEntries (list) --List of ParameterMapEntry objects. (dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly. (string) -- :type HITReviewPolicy: dict :param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy. PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01 Parameters (list) --Name of the parameter from the Review policy. (dict) --Name of the parameter from the Review policy. Key (string) --Name of the parameter from the list of Review Polices. Values (list) --The list of values of the Parameter (string) -- MapEntries (list) --List of ParameterMapEntry objects. (dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly. (string) -- :type HITLayoutId: string :param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters. Constraints: Either a Question parameter or a HITLayoutId parameter must be provided. :type HITLayoutParameters: list :param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout. (dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT. Name (string) --The name of the parameter in the HITLayout. Value (string) --The value substituted for the parameter referenced in the HITLayout. :rtype: dict :return: { 'HIT': { 'HITId': 'string', 'HITTypeId': 'string', 'HITGroupId': 'string', 'HITLayoutId': 'string', 'CreationTime': datetime(2015, 1, 1), 'Title': 'string', 'Description': 'string', 'Question': 'string', 'Keywords': 'string', 'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed', 'MaxAssignments': 123, 'Reward': 'string', 'AutoApprovalDelayInSeconds': 123, 'Expiration': datetime(2015, 1, 1), 'AssignmentDurationInSeconds': 123, 'RequesterAnnotation': 'string', 'QualificationRequirements': [ { 'QualificationTypeId': 'string', 'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn', 'IntegerValues': [ 123, ], 'LocaleValues': [ { 'Country': 'string', 'Subdivision': 'string' }, ], 'RequiredToPreview': True|False }, ], 'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate', 'NumberOfAssignmentsPending': 123, 'NumberOfAssignmentsAvailable': 123, 'NumberOfAssignmentsCompleted': 123 } } :returns: (integer) -- """ pass
[ "def", "create_hit", "(", "MaxAssignments", "=", "None", ",", "AutoApprovalDelayInSeconds", "=", "None", ",", "LifetimeInSeconds", "=", "None", ",", "AssignmentDurationInSeconds", "=", "None", ",", "Reward", "=", "None", ",", "Title", "=", "None", ",", "Keywords", "=", "None", ",", "Description", "=", "None", ",", "Question", "=", "None", ",", "RequesterAnnotation", "=", "None", ",", "QualificationRequirements", "=", "None", ",", "UniqueRequestToken", "=", "None", ",", "AssignmentReviewPolicy", "=", "None", ",", "HITReviewPolicy", "=", "None", ",", "HITLayoutId", "=", "None", ",", "HITLayoutParameters", "=", "None", ")", ":", "pass" ]
The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website. This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time. An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs. CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters . See also: AWS API Documentation :example: response = client.create_hit( MaxAssignments=123, AutoApprovalDelayInSeconds=123, LifetimeInSeconds=123, AssignmentDurationInSeconds=123, Reward='string', Title='string', Keywords='string', Description='string', Question='string', RequesterAnnotation='string', QualificationRequirements=[ { 'QualificationTypeId': 'string', 'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn', 'IntegerValues': [ 123, ], 'LocaleValues': [ { 'Country': 'string', 'Subdivision': 'string' }, ], 'RequiredToPreview': True|False }, ], UniqueRequestToken='string', AssignmentReviewPolicy={ 'PolicyName': 'string', 'Parameters': [ { 'Key': 'string', 'Values': [ 'string', ], 'MapEntries': [ { 'Key': 'string', 'Values': [ 'string', ] }, ] }, ] }, HITReviewPolicy={ 'PolicyName': 'string', 'Parameters': [ { 'Key': 'string', 'Values': [ 'string', ], 'MapEntries': [ { 'Key': 'string', 'Values': [ 'string', ] }, ] }, ] }, HITLayoutId='string', HITLayoutParameters=[ { 'Name': 'string', 'Value': 'string' }, ] ) :type MaxAssignments: integer :param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable. :type AutoApprovalDelayInSeconds: integer :param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it. :type LifetimeInSeconds: integer :param LifetimeInSeconds: [REQUIRED] An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted. :type AssignmentDurationInSeconds: integer :param AssignmentDurationInSeconds: [REQUIRED] The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept. :type Reward: string :param Reward: [REQUIRED] The amount of money the Requester will pay a Worker for successfully completing the HIT. :type Title: string :param Title: [REQUIRED] The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned. :type Keywords: string :param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs. :type Description: string :param Description: [REQUIRED] A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it. :type Question: string :param Question: The data the person completing the HIT uses to produce the results. Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace. Either a Question parameter or a HITLayoutId parameter must be provided. :type RequesterAnnotation: string :param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT. The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester. The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped. :type QualificationRequirements: list :param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT. (dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT. QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement. Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value. IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure. (integer) -- LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure. (dict) --The Locale data structure represents a geographical region or location. Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America. Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington. RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false. :type UniqueRequestToken: string :param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId. Note Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs. :type AssignmentReviewPolicy: dict :param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy. PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01 Parameters (list) --Name of the parameter from the Review policy. (dict) --Name of the parameter from the Review policy. Key (string) --Name of the parameter from the list of Review Polices. Values (list) --The list of values of the Parameter (string) -- MapEntries (list) --List of ParameterMapEntry objects. (dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly. (string) -- :type HITReviewPolicy: dict :param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy. PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01 Parameters (list) --Name of the parameter from the Review policy. (dict) --Name of the parameter from the Review policy. Key (string) --Name of the parameter from the list of Review Polices. Values (list) --The list of values of the Parameter (string) -- MapEntries (list) --List of ParameterMapEntry objects. (dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy. Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly. (string) -- :type HITLayoutId: string :param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters. Constraints: Either a Question parameter or a HITLayoutId parameter must be provided. :type HITLayoutParameters: list :param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout. (dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT. Name (string) --The name of the parameter in the HITLayout. Value (string) --The value substituted for the parameter referenced in the HITLayout. :rtype: dict :return: { 'HIT': { 'HITId': 'string', 'HITTypeId': 'string', 'HITGroupId': 'string', 'HITLayoutId': 'string', 'CreationTime': datetime(2015, 1, 1), 'Title': 'string', 'Description': 'string', 'Question': 'string', 'Keywords': 'string', 'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed', 'MaxAssignments': 123, 'Reward': 'string', 'AutoApprovalDelayInSeconds': 123, 'Expiration': datetime(2015, 1, 1), 'AssignmentDurationInSeconds': 123, 'RequesterAnnotation': 'string', 'QualificationRequirements': [ { 'QualificationTypeId': 'string', 'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn', 'IntegerValues': [ 123, ], 'LocaleValues': [ { 'Country': 'string', 'Subdivision': 'string' }, ], 'RequiredToPreview': True|False }, ], 'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate', 'NumberOfAssignmentsPending': 123, 'NumberOfAssignmentsAvailable': 123, 'NumberOfAssignmentsCompleted': 123 } } :returns: (integer) --
[ "The", "CreateHIT", "operation", "creates", "a", "new", "Human", "Intelligence", "Task", "(", "HIT", ")", ".", "The", "new", "HIT", "is", "made", "available", "for", "Workers", "to", "find", "and", "accept", "on", "the", "Amazon", "Mechanical", "Turk", "website", ".", "This", "operation", "allows", "you", "to", "specify", "a", "new", "HIT", "by", "passing", "in", "values", "for", "the", "properties", "of", "the", "HIT", "such", "as", "its", "title", "reward", "amount", "and", "number", "of", "assignments", ".", "When", "you", "pass", "these", "values", "to", "CreateHIT", "a", "new", "HIT", "is", "created", "for", "you", "with", "a", "new", "HITTypeID", ".", "The", "HITTypeID", "can", "be", "used", "to", "create", "additional", "HITs", "in", "the", "future", "without", "needing", "to", "specify", "common", "parameters", "such", "as", "the", "title", "description", "and", "reward", "amount", "each", "time", ".", "An", "alternative", "way", "to", "create", "HITs", "is", "to", "first", "generate", "a", "HITTypeID", "using", "the", "CreateHITType", "operation", "and", "then", "call", "the", "CreateHITWithHITType", "operation", ".", "This", "is", "the", "recommended", "best", "practice", "for", "Requesters", "who", "are", "creating", "large", "numbers", "of", "HITs", ".", "CreateHIT", "also", "supports", "several", "ways", "to", "provide", "question", "data", ":", "by", "providing", "a", "value", "for", "the", "Question", "parameter", "that", "fully", "specifies", "the", "contents", "of", "the", "HIT", "or", "by", "providing", "a", "HitLayoutId", "and", "associated", "HitLayoutParameters", ".", "See", "also", ":", "AWS", "API", "Documentation", ":", "example", ":", "response", "=", "client", ".", "create_hit", "(", "MaxAssignments", "=", "123", "AutoApprovalDelayInSeconds", "=", "123", "LifetimeInSeconds", "=", "123", "AssignmentDurationInSeconds", "=", "123", "Reward", "=", "string", "Title", "=", "string", "Keywords", "=", "string", "Description", "=", "string", "Question", "=", "string", "RequesterAnnotation", "=", "string", "QualificationRequirements", "=", "[", "{", "QualificationTypeId", ":", "string", "Comparator", ":", "LessThan", "|", "LessThanOrEqualTo", "|", "GreaterThan", "|", "GreaterThanOrEqualTo", "|", "EqualTo", "|", "NotEqualTo", "|", "Exists", "|", "DoesNotExist", "|", "In", "|", "NotIn", "IntegerValues", ":", "[", "123", "]", "LocaleValues", ":", "[", "{", "Country", ":", "string", "Subdivision", ":", "string", "}", "]", "RequiredToPreview", ":", "True|False", "}", "]", "UniqueRequestToken", "=", "string", "AssignmentReviewPolicy", "=", "{", "PolicyName", ":", "string", "Parameters", ":", "[", "{", "Key", ":", "string", "Values", ":", "[", "string", "]", "MapEntries", ":", "[", "{", "Key", ":", "string", "Values", ":", "[", "string", "]", "}", "]", "}", "]", "}", "HITReviewPolicy", "=", "{", "PolicyName", ":", "string", "Parameters", ":", "[", "{", "Key", ":", "string", "Values", ":", "[", "string", "]", "MapEntries", ":", "[", "{", "Key", ":", "string", "Values", ":", "[", "string", "]", "}", "]", "}", "]", "}", "HITLayoutId", "=", "string", "HITLayoutParameters", "=", "[", "{", "Name", ":", "string", "Value", ":", "string", "}", "]", ")", ":", "type", "MaxAssignments", ":", "integer", ":", "param", "MaxAssignments", ":", "The", "number", "of", "times", "the", "HIT", "can", "be", "accepted", "and", "completed", "before", "the", "HIT", "becomes", "unavailable", "." ]
python
train
66.282258
jeffknupp/sandman2
sandman2/app.py
https://github.com/jeffknupp/sandman2/blob/1ce21d6f7a6df77fa96fab694b0f9bb8469c166b/sandman2/app.py#L95-L123
def register_service(cls, primary_key_type): """Register an API service endpoint. :param cls: The class to register :param str primary_key_type: The type (as a string) of the primary_key field """ view_func = cls.as_view(cls.__name__.lower()) # pylint: disable=no-member methods = set(cls.__model__.__methods__) # pylint: disable=no-member if 'GET' in methods: # pylint: disable=no-member current_app.add_url_rule( cls.__model__.__url__ + '/', defaults={'resource_id': None}, view_func=view_func, methods=['GET']) current_app.add_url_rule( '{resource}/meta'.format(resource=cls.__model__.__url__), view_func=view_func, methods=['GET']) if 'POST' in methods: # pylint: disable=no-member current_app.add_url_rule( cls.__model__.__url__ + '/', view_func=view_func, methods=['POST', ]) current_app.add_url_rule( '{resource}/<{pk_type}:{pk}>'.format( resource=cls.__model__.__url__, pk='resource_id', pk_type=primary_key_type), view_func=view_func, methods=methods - {'POST'}) current_app.classes.append(cls)
[ "def", "register_service", "(", "cls", ",", "primary_key_type", ")", ":", "view_func", "=", "cls", ".", "as_view", "(", "cls", ".", "__name__", ".", "lower", "(", ")", ")", "# pylint: disable=no-member", "methods", "=", "set", "(", "cls", ".", "__model__", ".", "__methods__", ")", "# pylint: disable=no-member", "if", "'GET'", "in", "methods", ":", "# pylint: disable=no-member", "current_app", ".", "add_url_rule", "(", "cls", ".", "__model__", ".", "__url__", "+", "'/'", ",", "defaults", "=", "{", "'resource_id'", ":", "None", "}", ",", "view_func", "=", "view_func", ",", "methods", "=", "[", "'GET'", "]", ")", "current_app", ".", "add_url_rule", "(", "'{resource}/meta'", ".", "format", "(", "resource", "=", "cls", ".", "__model__", ".", "__url__", ")", ",", "view_func", "=", "view_func", ",", "methods", "=", "[", "'GET'", "]", ")", "if", "'POST'", "in", "methods", ":", "# pylint: disable=no-member", "current_app", ".", "add_url_rule", "(", "cls", ".", "__model__", ".", "__url__", "+", "'/'", ",", "view_func", "=", "view_func", ",", "methods", "=", "[", "'POST'", ",", "]", ")", "current_app", ".", "add_url_rule", "(", "'{resource}/<{pk_type}:{pk}>'", ".", "format", "(", "resource", "=", "cls", ".", "__model__", ".", "__url__", ",", "pk", "=", "'resource_id'", ",", "pk_type", "=", "primary_key_type", ")", ",", "view_func", "=", "view_func", ",", "methods", "=", "methods", "-", "{", "'POST'", "}", ")", "current_app", ".", "classes", ".", "append", "(", "cls", ")" ]
Register an API service endpoint. :param cls: The class to register :param str primary_key_type: The type (as a string) of the primary_key field
[ "Register", "an", "API", "service", "endpoint", "." ]
python
train
41.724138
wal-e/wal-e
wal_e/tar_partition.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/tar_partition.py#L187-L228
def cat_extract(tar, member, targetpath): """Extract a regular file member using cat for async-like I/O Mostly adapted from tarfile.py. """ assert member.isreg() # Fetch the TarInfo object for the given name and build the # destination pathname, replacing forward slashes to platform # specific separators. targetpath = targetpath.rstrip("/") targetpath = targetpath.replace("/", os.sep) # Create all upper directories. upperdirs = os.path.dirname(targetpath) if upperdirs and not os.path.exists(upperdirs): try: # Create directories that are not part of the archive with # default permissions. os.makedirs(upperdirs) except EnvironmentError as e: if e.errno == errno.EEXIST: # Ignore an error caused by the race of # the directory being created between the # check for the path and the creation. pass else: raise with files.DeleteOnError(targetpath) as dest: with pipeline.get_cat_pipeline(pipeline.PIPE, dest.f) as pl: fp = tar.extractfile(member) copyfileobj.copyfileobj(fp, pl.stdin) if sys.version_info < (3, 5): tar.chown(member, targetpath) else: tar.chown(member, targetpath, False) tar.chmod(member, targetpath) tar.utime(member, targetpath)
[ "def", "cat_extract", "(", "tar", ",", "member", ",", "targetpath", ")", ":", "assert", "member", ".", "isreg", "(", ")", "# Fetch the TarInfo object for the given name and build the", "# destination pathname, replacing forward slashes to platform", "# specific separators.", "targetpath", "=", "targetpath", ".", "rstrip", "(", "\"/\"", ")", "targetpath", "=", "targetpath", ".", "replace", "(", "\"/\"", ",", "os", ".", "sep", ")", "# Create all upper directories.", "upperdirs", "=", "os", ".", "path", ".", "dirname", "(", "targetpath", ")", "if", "upperdirs", "and", "not", "os", ".", "path", ".", "exists", "(", "upperdirs", ")", ":", "try", ":", "# Create directories that are not part of the archive with", "# default permissions.", "os", ".", "makedirs", "(", "upperdirs", ")", "except", "EnvironmentError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EEXIST", ":", "# Ignore an error caused by the race of", "# the directory being created between the", "# check for the path and the creation.", "pass", "else", ":", "raise", "with", "files", ".", "DeleteOnError", "(", "targetpath", ")", "as", "dest", ":", "with", "pipeline", ".", "get_cat_pipeline", "(", "pipeline", ".", "PIPE", ",", "dest", ".", "f", ")", "as", "pl", ":", "fp", "=", "tar", ".", "extractfile", "(", "member", ")", "copyfileobj", ".", "copyfileobj", "(", "fp", ",", "pl", ".", "stdin", ")", "if", "sys", ".", "version_info", "<", "(", "3", ",", "5", ")", ":", "tar", ".", "chown", "(", "member", ",", "targetpath", ")", "else", ":", "tar", ".", "chown", "(", "member", ",", "targetpath", ",", "False", ")", "tar", ".", "chmod", "(", "member", ",", "targetpath", ")", "tar", ".", "utime", "(", "member", ",", "targetpath", ")" ]
Extract a regular file member using cat for async-like I/O Mostly adapted from tarfile.py.
[ "Extract", "a", "regular", "file", "member", "using", "cat", "for", "async", "-", "like", "I", "/", "O" ]
python
train
33.047619
BeyondTheClouds/enoslib
enoslib/api.py
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/api.py#L966-L977
def _merge_constraints(constraints, overrides): """Merge the constraints avoiding duplicates Change constraints in place. """ for o in overrides: i = 0 while i < len(constraints): c = constraints[i] if _same(o, c): constraints[i].update(o) break i = i + 1
[ "def", "_merge_constraints", "(", "constraints", ",", "overrides", ")", ":", "for", "o", "in", "overrides", ":", "i", "=", "0", "while", "i", "<", "len", "(", "constraints", ")", ":", "c", "=", "constraints", "[", "i", "]", "if", "_same", "(", "o", ",", "c", ")", ":", "constraints", "[", "i", "]", ".", "update", "(", "o", ")", "break", "i", "=", "i", "+", "1" ]
Merge the constraints avoiding duplicates Change constraints in place.
[ "Merge", "the", "constraints", "avoiding", "duplicates", "Change", "constraints", "in", "place", "." ]
python
train
28.666667
Cadene/pretrained-models.pytorch
pretrainedmodels/models/torchvision_models.py
https://github.com/Cadene/pretrained-models.pytorch/blob/021d97897c9aa76ec759deff43d341c4fd45d7ba/pretrainedmodels/models/torchvision_models.py#L205-L214
def densenet121(num_classes=1000, pretrained='imagenet'): r"""Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = models.densenet121(pretrained=False) if pretrained is not None: settings = pretrained_settings['densenet121'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_densenets(model) return model
[ "def", "densenet121", "(", "num_classes", "=", "1000", ",", "pretrained", "=", "'imagenet'", ")", ":", "model", "=", "models", ".", "densenet121", "(", "pretrained", "=", "False", ")", "if", "pretrained", "is", "not", "None", ":", "settings", "=", "pretrained_settings", "[", "'densenet121'", "]", "[", "pretrained", "]", "model", "=", "load_pretrained", "(", "model", ",", "num_classes", ",", "settings", ")", "model", "=", "modify_densenets", "(", "model", ")", "return", "model" ]
r"""Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
[ "r", "Densenet", "-", "121", "model", "from", "Densely", "Connected", "Convolutional", "Networks", "<https", ":", "//", "arxiv", ".", "org", "/", "pdf", "/", "1608", ".", "06993", ".", "pdf", ">" ]
python
train
43.7
Yelp/kafka-utils
kafka_utils/kafka_cluster_manager/cluster_info/broker.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_cluster_manager/cluster_info/broker.py#L152-L174
def get_preferred_partition(self, broker, sibling_distance): """The preferred partition belongs to the topic with the minimum (also negative) distance between destination and source. :param broker: Destination broker :param sibling_distance: dict {topic: distance} negative distance should mean that destination broker has got less partition of a certain topic than source self. :returns: A partition or None if no eligible partitions are available """ # Only partitions not having replica in broker are valid # Get best fit partition, based on avoiding partition from same topic # and partition with least siblings in destination-broker. eligible_partitions = self.partitions - broker.partitions if eligible_partitions: pref_partition = min( eligible_partitions, key=lambda source_partition: sibling_distance[source_partition.topic], ) return pref_partition else: return None
[ "def", "get_preferred_partition", "(", "self", ",", "broker", ",", "sibling_distance", ")", ":", "# Only partitions not having replica in broker are valid", "# Get best fit partition, based on avoiding partition from same topic", "# and partition with least siblings in destination-broker.", "eligible_partitions", "=", "self", ".", "partitions", "-", "broker", ".", "partitions", "if", "eligible_partitions", ":", "pref_partition", "=", "min", "(", "eligible_partitions", ",", "key", "=", "lambda", "source_partition", ":", "sibling_distance", "[", "source_partition", ".", "topic", "]", ",", ")", "return", "pref_partition", "else", ":", "return", "None" ]
The preferred partition belongs to the topic with the minimum (also negative) distance between destination and source. :param broker: Destination broker :param sibling_distance: dict {topic: distance} negative distance should mean that destination broker has got less partition of a certain topic than source self. :returns: A partition or None if no eligible partitions are available
[ "The", "preferred", "partition", "belongs", "to", "the", "topic", "with", "the", "minimum", "(", "also", "negative", ")", "distance", "between", "destination", "and", "source", "." ]
python
train
46.782609
OnroerendErfgoed/oe_utils
oe_utils/audit.py
https://github.com/OnroerendErfgoed/oe_utils/blob/7b2014bda8ac6bb71b7138eaa06ac17ef3ff4a6d/oe_utils/audit.py#L40-L94
def audit(**kwargs): """ use this decorator to audit an operation """ def wrap(fn): @functools.wraps(fn) def advice(parent_object, *args, **kw): request = parent_object.request wijziging = request.audit_manager.create_revision() result = fn(parent_object, *args, **kw) if hasattr(request, 'user') and request.user is not None and 'actor' in request.user: actor = request.user['actor'] attributes = request.user['attributes'] wijziging.updated_by = actor.get('uri', None) if actor.get('uri') == actor.get('instantie_actor_uri'): wijziging.updated_by_omschrijving = ( attributes.get('displayname') or attributes.get('mail') or actor.get('omschrijving')) else: wijziging.updated_by_omschrijving = actor.get( 'omschrijving') else: wijziging.updated_by = 'publiek' wijziging.updated_by_omschrijving = 'publiek' r_id = request.matchdict.get('id') wijziging.resource_object_id = r_id if result is not None: try: renderer_name = request.registry.settings.get( 'audit.pyramid.json.renderer', 'jsonrenderer') json_string = renderers.render(renderer_name, result, request=request) result_object_json = json.loads(json_string) wijziging.resource_object_json = result_object_json wijziging.resource_object_id = _get_id_from_result(r_id, result_object_json, kwargs) except Exception as e: log.error(e) wijziging.versie = _get_versie_hash(wijziging) wijziging.actie = kwargs.get('actie') if kwargs.get('actie') else _action_from_request(request) request.audit_manager.save(wijziging) return result return advice return wrap
[ "def", "audit", "(", "*", "*", "kwargs", ")", ":", "def", "wrap", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "advice", "(", "parent_object", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "request", "=", "parent_object", ".", "request", "wijziging", "=", "request", ".", "audit_manager", ".", "create_revision", "(", ")", "result", "=", "fn", "(", "parent_object", ",", "*", "args", ",", "*", "*", "kw", ")", "if", "hasattr", "(", "request", ",", "'user'", ")", "and", "request", ".", "user", "is", "not", "None", "and", "'actor'", "in", "request", ".", "user", ":", "actor", "=", "request", ".", "user", "[", "'actor'", "]", "attributes", "=", "request", ".", "user", "[", "'attributes'", "]", "wijziging", ".", "updated_by", "=", "actor", ".", "get", "(", "'uri'", ",", "None", ")", "if", "actor", ".", "get", "(", "'uri'", ")", "==", "actor", ".", "get", "(", "'instantie_actor_uri'", ")", ":", "wijziging", ".", "updated_by_omschrijving", "=", "(", "attributes", ".", "get", "(", "'displayname'", ")", "or", "attributes", ".", "get", "(", "'mail'", ")", "or", "actor", ".", "get", "(", "'omschrijving'", ")", ")", "else", ":", "wijziging", ".", "updated_by_omschrijving", "=", "actor", ".", "get", "(", "'omschrijving'", ")", "else", ":", "wijziging", ".", "updated_by", "=", "'publiek'", "wijziging", ".", "updated_by_omschrijving", "=", "'publiek'", "r_id", "=", "request", ".", "matchdict", ".", "get", "(", "'id'", ")", "wijziging", ".", "resource_object_id", "=", "r_id", "if", "result", "is", "not", "None", ":", "try", ":", "renderer_name", "=", "request", ".", "registry", ".", "settings", ".", "get", "(", "'audit.pyramid.json.renderer'", ",", "'jsonrenderer'", ")", "json_string", "=", "renderers", ".", "render", "(", "renderer_name", ",", "result", ",", "request", "=", "request", ")", "result_object_json", "=", "json", ".", "loads", "(", "json_string", ")", "wijziging", ".", "resource_object_json", "=", "result_object_json", "wijziging", ".", "resource_object_id", "=", "_get_id_from_result", "(", "r_id", ",", "result_object_json", ",", "kwargs", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "e", ")", "wijziging", ".", "versie", "=", "_get_versie_hash", "(", "wijziging", ")", "wijziging", ".", "actie", "=", "kwargs", ".", "get", "(", "'actie'", ")", "if", "kwargs", ".", "get", "(", "'actie'", ")", "else", "_action_from_request", "(", "request", ")", "request", ".", "audit_manager", ".", "save", "(", "wijziging", ")", "return", "result", "return", "advice", "return", "wrap" ]
use this decorator to audit an operation
[ "use", "this", "decorator", "to", "audit", "an", "operation" ]
python
train
39.109091
konstantinstadler/pymrio
pymrio/core/mriosystem.py
https://github.com/konstantinstadler/pymrio/blob/d764aa0dd2150200e867a9713a98ddae203e12d4/pymrio/core/mriosystem.py#L1667-L1684
def reset_all_to_coefficients(self): """ Resets the IOSystem and all extensions to coefficients. This method calls reset_to_coefficients for the IOSystem and for all Extensions in the system Note ----- The system can not be reconstructed after this steps because all absolute data is removed. Save the Y data in case a reconstruction might be necessary. """ self.reset_to_coefficients() [ee.reset_to_coefficients() for ee in self.get_extensions(data=True)] self.meta._add_modify("Reset full system to coefficients") return self
[ "def", "reset_all_to_coefficients", "(", "self", ")", ":", "self", ".", "reset_to_coefficients", "(", ")", "[", "ee", ".", "reset_to_coefficients", "(", ")", "for", "ee", "in", "self", ".", "get_extensions", "(", "data", "=", "True", ")", "]", "self", ".", "meta", ".", "_add_modify", "(", "\"Reset full system to coefficients\"", ")", "return", "self" ]
Resets the IOSystem and all extensions to coefficients. This method calls reset_to_coefficients for the IOSystem and for all Extensions in the system Note ----- The system can not be reconstructed after this steps because all absolute data is removed. Save the Y data in case a reconstruction might be necessary.
[ "Resets", "the", "IOSystem", "and", "all", "extensions", "to", "coefficients", "." ]
python
train
34.333333
sibirrer/lenstronomy
lenstronomy/LensModel/Profiles/dipole.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/LensModel/Profiles/dipole.py#L102-L108
def angle(self, center1_x, center1_y, center2_x, center2_y): """ compute the rotation angle of the dipole :return: """ phi_G = np.arctan2(center2_y - center1_y, center2_x - center1_x) return phi_G
[ "def", "angle", "(", "self", ",", "center1_x", ",", "center1_y", ",", "center2_x", ",", "center2_y", ")", ":", "phi_G", "=", "np", ".", "arctan2", "(", "center2_y", "-", "center1_y", ",", "center2_x", "-", "center1_x", ")", "return", "phi_G" ]
compute the rotation angle of the dipole :return:
[ "compute", "the", "rotation", "angle", "of", "the", "dipole", ":", "return", ":" ]
python
train
34
meejah/txtorcon
txtorcon/circuit.py
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/circuit.py#L288-L326
def stream_via(self, reactor, host, port, socks_endpoint, use_tls=False): """ This returns an `IStreamClientEndpoint`_ that will connect to the given ``host``, ``port`` via Tor -- and via this parciular circuit. We match the streams up using their source-ports, so even if there are many streams in-flight to the same destination they will align correctly. For example, to cause a stream to go to ``torproject.org:443`` via a particular circuit:: @inlineCallbacks def main(reactor): circ = yield torstate.build_circuit() # lets Tor decide the path yield circ.when_built() tor_ep = circ.stream_via(reactor, 'torproject.org', 443) # 'factory' is for your protocol proto = yield tor_ep.connect(factory) Note that if you're doing client-side Web requests, you probably want to use `treq <http://treq.readthedocs.org/en/latest/>`_ or ``Agent`` directly so call :meth:`txtorcon.Circuit.web_agent` instead. :param socks_endpoint: should be a Deferred firing a valid IStreamClientEndpoint pointing at a Tor SOCKS port (or an IStreamClientEndpoint already). .. _istreamclientendpoint: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamClientEndpoint.html """ from .endpoints import TorClientEndpoint ep = TorClientEndpoint( host, port, socks_endpoint, tls=use_tls, reactor=reactor, ) return TorCircuitEndpoint(reactor, self._torstate, self, ep)
[ "def", "stream_via", "(", "self", ",", "reactor", ",", "host", ",", "port", ",", "socks_endpoint", ",", "use_tls", "=", "False", ")", ":", "from", ".", "endpoints", "import", "TorClientEndpoint", "ep", "=", "TorClientEndpoint", "(", "host", ",", "port", ",", "socks_endpoint", ",", "tls", "=", "use_tls", ",", "reactor", "=", "reactor", ",", ")", "return", "TorCircuitEndpoint", "(", "reactor", ",", "self", ".", "_torstate", ",", "self", ",", "ep", ")" ]
This returns an `IStreamClientEndpoint`_ that will connect to the given ``host``, ``port`` via Tor -- and via this parciular circuit. We match the streams up using their source-ports, so even if there are many streams in-flight to the same destination they will align correctly. For example, to cause a stream to go to ``torproject.org:443`` via a particular circuit:: @inlineCallbacks def main(reactor): circ = yield torstate.build_circuit() # lets Tor decide the path yield circ.when_built() tor_ep = circ.stream_via(reactor, 'torproject.org', 443) # 'factory' is for your protocol proto = yield tor_ep.connect(factory) Note that if you're doing client-side Web requests, you probably want to use `treq <http://treq.readthedocs.org/en/latest/>`_ or ``Agent`` directly so call :meth:`txtorcon.Circuit.web_agent` instead. :param socks_endpoint: should be a Deferred firing a valid IStreamClientEndpoint pointing at a Tor SOCKS port (or an IStreamClientEndpoint already). .. _istreamclientendpoint: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamClientEndpoint.html
[ "This", "returns", "an", "IStreamClientEndpoint", "_", "that", "will", "connect", "to", "the", "given", "host", "port", "via", "Tor", "--", "and", "via", "this", "parciular", "circuit", "." ]
python
train
43.333333
python-useful-helpers/advanced-descriptors
advanced_descriptors/log_on_access.py
https://github.com/python-useful-helpers/advanced-descriptors/blob/17ee4a35b3bfcb4adf4ed2f41e75c4c6b71cb003/advanced_descriptors/log_on_access.py#L202-L216
def _get_logger_for_instance(self, instance: typing.Any) -> logging.Logger: """Get logger for log calls. :param instance: Owner class instance. Filled only if instance created, else None. :type instance: typing.Optional[owner] :return: logger instance :rtype: logging.Logger """ if self.logger is not None: # pylint: disable=no-else-return return self.logger elif hasattr(instance, "logger") and isinstance(instance.logger, logging.Logger): return instance.logger elif hasattr(instance, "log") and isinstance(instance.log, logging.Logger): return instance.log return _LOGGER
[ "def", "_get_logger_for_instance", "(", "self", ",", "instance", ":", "typing", ".", "Any", ")", "->", "logging", ".", "Logger", ":", "if", "self", ".", "logger", "is", "not", "None", ":", "# pylint: disable=no-else-return", "return", "self", ".", "logger", "elif", "hasattr", "(", "instance", ",", "\"logger\"", ")", "and", "isinstance", "(", "instance", ".", "logger", ",", "logging", ".", "Logger", ")", ":", "return", "instance", ".", "logger", "elif", "hasattr", "(", "instance", ",", "\"log\"", ")", "and", "isinstance", "(", "instance", ".", "log", ",", "logging", ".", "Logger", ")", ":", "return", "instance", ".", "log", "return", "_LOGGER" ]
Get logger for log calls. :param instance: Owner class instance. Filled only if instance created, else None. :type instance: typing.Optional[owner] :return: logger instance :rtype: logging.Logger
[ "Get", "logger", "for", "log", "calls", "." ]
python
test
45.2
jgorset/django-respite
respite/formats.py
https://github.com/jgorset/django-respite/blob/719469d11baf91d05917bab1623bd82adc543546/respite/formats.py#L69-L79
def find_by_extension(extension): """ Find and return a format by extension. :param extension: A string describing the extension of the format. """ for format in FORMATS: if extension in format.extensions: return format raise UnknownFormat('No format found with extension "%s"' % extension)
[ "def", "find_by_extension", "(", "extension", ")", ":", "for", "format", "in", "FORMATS", ":", "if", "extension", "in", "format", ".", "extensions", ":", "return", "format", "raise", "UnknownFormat", "(", "'No format found with extension \"%s\"'", "%", "extension", ")" ]
Find and return a format by extension. :param extension: A string describing the extension of the format.
[ "Find", "and", "return", "a", "format", "by", "extension", "." ]
python
train
29.636364
xolox/python-vcs-repo-mgr
vcs_repo_mgr/__init__.py
https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/__init__.py#L935-L968
def tags(self): """ A dictionary that maps tag names to :class:`Revision` objects. Here's an example based on a mirror of the git project's repository: >>> from pprint import pprint >>> from vcs_repo_mgr.backends.git import GitRepo >>> repository = GitRepo(remote='https://github.com/git/git.git') >>> pprint(repository.tags) {'v0.99': Revision(repository=GitRepo(...), tag='v0.99', revision_id='d6602ec5194c87b0fc87103ca4d67251c76f233a'), 'v0.99.1': Revision(repository=GitRepo(...), tag='v0.99.1', revision_id='f25a265a342aed6041ab0cc484224d9ca54b6f41'), 'v0.99.2': Revision(repository=GitRepo(...), tag='v0.99.2', revision_id='c5db5456ae3b0873fc659c19fafdde22313cc441'), ..., # dozens of tags omitted to keep this example short 'v2.3.6': Revision(repository=GitRepo(...), tag='v2.3.6', revision_id='8e7304597727126cdc52771a9091d7075a70cc31'), 'v2.3.7': Revision(repository=GitRepo(...), tag='v2.3.7', revision_id='b17db4d9c966de30f5445632411c932150e2ad2f'), 'v2.4.0': Revision(repository=GitRepo(...), tag='v2.4.0', revision_id='67308bd628c6235dbc1bad60c9ad1f2d27d576cc')} """ # Make sure the local repository exists. self.create() # Create a mapping of tag names to revisions. return dict((r.tag, r) for r in self.find_tags())
[ "def", "tags", "(", "self", ")", ":", "# Make sure the local repository exists.", "self", ".", "create", "(", ")", "# Create a mapping of tag names to revisions.", "return", "dict", "(", "(", "r", ".", "tag", ",", "r", ")", "for", "r", "in", "self", ".", "find_tags", "(", ")", ")" ]
A dictionary that maps tag names to :class:`Revision` objects. Here's an example based on a mirror of the git project's repository: >>> from pprint import pprint >>> from vcs_repo_mgr.backends.git import GitRepo >>> repository = GitRepo(remote='https://github.com/git/git.git') >>> pprint(repository.tags) {'v0.99': Revision(repository=GitRepo(...), tag='v0.99', revision_id='d6602ec5194c87b0fc87103ca4d67251c76f233a'), 'v0.99.1': Revision(repository=GitRepo(...), tag='v0.99.1', revision_id='f25a265a342aed6041ab0cc484224d9ca54b6f41'), 'v0.99.2': Revision(repository=GitRepo(...), tag='v0.99.2', revision_id='c5db5456ae3b0873fc659c19fafdde22313cc441'), ..., # dozens of tags omitted to keep this example short 'v2.3.6': Revision(repository=GitRepo(...), tag='v2.3.6', revision_id='8e7304597727126cdc52771a9091d7075a70cc31'), 'v2.3.7': Revision(repository=GitRepo(...), tag='v2.3.7', revision_id='b17db4d9c966de30f5445632411c932150e2ad2f'), 'v2.4.0': Revision(repository=GitRepo(...), tag='v2.4.0', revision_id='67308bd628c6235dbc1bad60c9ad1f2d27d576cc')}
[ "A", "dictionary", "that", "maps", "tag", "names", "to", ":", "class", ":", "Revision", "objects", "." ]
python
train
49.852941
hyperledger/sawtooth-core
cli/sawtooth_cli/network_command/compare.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/cli/sawtooth_cli/network_command/compare.py#L442-L459
def print_block_num_row(block_num, cliques, next_cliques): """Print out a row of padding and a row with the block number. Includes the branches prior to this block number.""" n_cliques = len(cliques) if n_cliques == 0: print('| {}'.format(block_num)) return def mapper(clique): block_id, _ = clique if block_id not in next_cliques: return ' ' return '|' format_str = '{:<' + str(n_cliques * 2) + '} {}' branches = list(map(mapper, cliques)) for end in ('', block_num): print(format_str.format(' '.join(branches), end))
[ "def", "print_block_num_row", "(", "block_num", ",", "cliques", ",", "next_cliques", ")", ":", "n_cliques", "=", "len", "(", "cliques", ")", "if", "n_cliques", "==", "0", ":", "print", "(", "'| {}'", ".", "format", "(", "block_num", ")", ")", "return", "def", "mapper", "(", "clique", ")", ":", "block_id", ",", "_", "=", "clique", "if", "block_id", "not", "in", "next_cliques", ":", "return", "' '", "return", "'|'", "format_str", "=", "'{:<'", "+", "str", "(", "n_cliques", "*", "2", ")", "+", "'} {}'", "branches", "=", "list", "(", "map", "(", "mapper", ",", "cliques", ")", ")", "for", "end", "in", "(", "''", ",", "block_num", ")", ":", "print", "(", "format_str", ".", "format", "(", "' '", ".", "join", "(", "branches", ")", ",", "end", ")", ")" ]
Print out a row of padding and a row with the block number. Includes the branches prior to this block number.
[ "Print", "out", "a", "row", "of", "padding", "and", "a", "row", "with", "the", "block", "number", ".", "Includes", "the", "branches", "prior", "to", "this", "block", "number", "." ]
python
train
33.111111
treycucco/pyebnf
pyebnf/compiler.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/compiler.py#L396-L406
def _ast_op_concat_to_code(self, opr, *, ignore_whitespace, **kwargs): """Convert an AST concatenate op to python source code.""" hoist_target = OP_CONCAT if ignore_whitespace else OP_WS_CONCAT operands = self._hoist_operands(opr.operands, lambda t: isinstance(t, OptreeNode) and t.opnode.operator is hoist_target) lines = ["concatenation(["] for op in operands: lines.extend(self._indent(self._ast_to_code(op, ignore_whitespace=ignore_whitespace))) lines[-1] += "," lines.append("], ignore_whitespace={})".format(bool(ignore_whitespace))) return lines
[ "def", "_ast_op_concat_to_code", "(", "self", ",", "opr", ",", "*", ",", "ignore_whitespace", ",", "*", "*", "kwargs", ")", ":", "hoist_target", "=", "OP_CONCAT", "if", "ignore_whitespace", "else", "OP_WS_CONCAT", "operands", "=", "self", ".", "_hoist_operands", "(", "opr", ".", "operands", ",", "lambda", "t", ":", "isinstance", "(", "t", ",", "OptreeNode", ")", "and", "t", ".", "opnode", ".", "operator", "is", "hoist_target", ")", "lines", "=", "[", "\"concatenation([\"", "]", "for", "op", "in", "operands", ":", "lines", ".", "extend", "(", "self", ".", "_indent", "(", "self", ".", "_ast_to_code", "(", "op", ",", "ignore_whitespace", "=", "ignore_whitespace", ")", ")", ")", "lines", "[", "-", "1", "]", "+=", "\",\"", "lines", ".", "append", "(", "\"], ignore_whitespace={})\"", ".", "format", "(", "bool", "(", "ignore_whitespace", ")", ")", ")", "return", "lines" ]
Convert an AST concatenate op to python source code.
[ "Convert", "an", "AST", "concatenate", "op", "to", "python", "source", "code", "." ]
python
test
53
pytroll/satpy
satpy/readers/sar_c_safe.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/sar_c_safe.py#L257-L279
def interpolate_xarray_linear(xpoints, ypoints, values, shape, chunks=CHUNK_SIZE): """Interpolate linearly, generating a dask array.""" from scipy.interpolate.interpnd import (LinearNDInterpolator, _ndim_coords_from_arrays) if isinstance(chunks, (list, tuple)): vchunks, hchunks = chunks else: vchunks, hchunks = chunks, chunks points = _ndim_coords_from_arrays(np.vstack((np.asarray(ypoints), np.asarray(xpoints))).T) interpolator = LinearNDInterpolator(points, values) grid_x, grid_y = da.meshgrid(da.arange(shape[1], chunks=hchunks), da.arange(shape[0], chunks=vchunks)) # workaround for non-thread-safe first call of the interpolator: interpolator((0, 0)) res = da.map_blocks(intp, grid_x, grid_y, interpolator=interpolator) return DataArray(res, dims=('y', 'x'))
[ "def", "interpolate_xarray_linear", "(", "xpoints", ",", "ypoints", ",", "values", ",", "shape", ",", "chunks", "=", "CHUNK_SIZE", ")", ":", "from", "scipy", ".", "interpolate", ".", "interpnd", "import", "(", "LinearNDInterpolator", ",", "_ndim_coords_from_arrays", ")", "if", "isinstance", "(", "chunks", ",", "(", "list", ",", "tuple", ")", ")", ":", "vchunks", ",", "hchunks", "=", "chunks", "else", ":", "vchunks", ",", "hchunks", "=", "chunks", ",", "chunks", "points", "=", "_ndim_coords_from_arrays", "(", "np", ".", "vstack", "(", "(", "np", ".", "asarray", "(", "ypoints", ")", ",", "np", ".", "asarray", "(", "xpoints", ")", ")", ")", ".", "T", ")", "interpolator", "=", "LinearNDInterpolator", "(", "points", ",", "values", ")", "grid_x", ",", "grid_y", "=", "da", ".", "meshgrid", "(", "da", ".", "arange", "(", "shape", "[", "1", "]", ",", "chunks", "=", "hchunks", ")", ",", "da", ".", "arange", "(", "shape", "[", "0", "]", ",", "chunks", "=", "vchunks", ")", ")", "# workaround for non-thread-safe first call of the interpolator:", "interpolator", "(", "(", "0", ",", "0", ")", ")", "res", "=", "da", ".", "map_blocks", "(", "intp", ",", "grid_x", ",", "grid_y", ",", "interpolator", "=", "interpolator", ")", "return", "DataArray", "(", "res", ",", "dims", "=", "(", "'y'", ",", "'x'", ")", ")" ]
Interpolate linearly, generating a dask array.
[ "Interpolate", "linearly", "generating", "a", "dask", "array", "." ]
python
train
40.73913
Pythonity/icon-font-to-png
icon_font_to_png/command_line.py
https://github.com/Pythonity/icon-font-to-png/blob/4851fe15c077402749f843d43fbc10d28f6c655d/icon_font_to_png/command_line.py#L10-L164
def run(arguments): """Main function for command line usage""" parser = argparse.ArgumentParser( description="Exports font icons as PNG images." ) parser.add_argument( '--list', action='store_true', help="list all available icon names and exit" ) parser.add_argument( '--download', choices=[x for x in AVAILABLE_ICON_FONTS.keys()], help="download latest icon font and exit" ) required_group = parser.add_argument_group("required arguments") required_group.add_argument( '--ttf', metavar='TTF-FILE', type=open, help='path to TTF file' ) required_group.add_argument( '--css', metavar='CSS-FILE', type=open, help="path to CSS file" ) exp_group = parser.add_argument_group("exporting icons") exp_group.add_argument( 'icons', type=str, nargs='*', help="names of the icons to export (or 'ALL' for all icons)" ) exp_group.add_argument( '--size', type=int, default=16, help="icon size in pixels (default: 16)" ) exp_group.add_argument( '--scale', type=str, default='auto', help="scaling factor between 0 and 1, or 'auto' for automatic scaling " "(default: auto); be careful, as setting it may lead to icons " "being cropped" ) exp_group.add_argument( '--color', type=str, default='black', help="color name or hex value (default: black)" ) exp_group.add_argument( '--filename', type=str, help="name of the output file (without '.png' extension); " "it's used as a prefix if multiple icons are exported" ) exp_group.add_argument( '--keep_prefix', default=False, action='store_true', help="do not remove common icon prefix " "(i.e. 'fa-arrow-right' instead of 'arrow-right')" ) args = parser.parse_args(arguments) # Parse '--download' argument first if args.download: downloader = download_icon_font(args.download, os.getcwd()) downloader.download_files() print("Icon font '{name}' successfully downloaded".format( name=args.download) ) parser.exit() # If not '--download', then css and tff files are required if not args.css or not args.ttf: parser.error("You have to provide CSS and TTF files") icon_font = IconFont(css_file=args.css.name, ttf_file=args.ttf.name, keep_prefix=args.keep_prefix) args.css.close() args.ttf.close() # Then '--list' if args.list: for icon in icon_font.css_icons.keys(): print(icon) parser.exit() # If not '--list' or '--download', parse passed icons selected_icons = list() if not args.icons: parser.error("You have to pass at least one icon name") elif args.icons == ['ALL']: selected_icons = icon_font.css_icons.keys() else: for icon in args.icons: if (args.keep_prefix and not icon.startswith(icon_font.common_prefix)): # Prepend icon name with prefix icon = icon_font.common_prefix + icon elif (not args.keep_prefix and icon.startswith(icon_font.common_prefix)): # Remove prefix from icon name icon = icon[len(icon_font.common_prefix):] # Check if given icon names exist if icon in icon_font.css_icons: selected_icons.append(icon) else: parser.error("Unknown icon name '{icon}'".format(icon=icon)) # Parse filename and remove the extension if necessary given_filename = args.filename or '' if given_filename.lower().endswith('.png'): given_filename = given_filename[:-4] # Some fonts have empty values # (prefix only - which we remove - for common styles) selected_icons = list(filter(None, selected_icons)) # Commence exporting for icon in selected_icons: if len(selected_icons) > 1: # Multiple icons - treat the filename option as name prefix filename = '{prefix}{icon}.png'.format( prefix=given_filename, icon=icon, ) else: if given_filename: # Use the specified filename filename = given_filename + '.png' else: # Use icon name as filename filename = str(icon) + '.png' print("Exporting icon '{icon}' as '{filename}'" "({size}x{size} pixels)".format(icon=icon, filename=filename, size=args.size)) icon_font.export_icon(icon=icon, filename=filename, size=args.size, color=args.color, scale=args.scale) print() print("All done")
[ "def", "run", "(", "arguments", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Exports font icons as PNG images.\"", ")", "parser", ".", "add_argument", "(", "'--list'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"list all available icon names and exit\"", ")", "parser", ".", "add_argument", "(", "'--download'", ",", "choices", "=", "[", "x", "for", "x", "in", "AVAILABLE_ICON_FONTS", ".", "keys", "(", ")", "]", ",", "help", "=", "\"download latest icon font and exit\"", ")", "required_group", "=", "parser", ".", "add_argument_group", "(", "\"required arguments\"", ")", "required_group", ".", "add_argument", "(", "'--ttf'", ",", "metavar", "=", "'TTF-FILE'", ",", "type", "=", "open", ",", "help", "=", "'path to TTF file'", ")", "required_group", ".", "add_argument", "(", "'--css'", ",", "metavar", "=", "'CSS-FILE'", ",", "type", "=", "open", ",", "help", "=", "\"path to CSS file\"", ")", "exp_group", "=", "parser", ".", "add_argument_group", "(", "\"exporting icons\"", ")", "exp_group", ".", "add_argument", "(", "'icons'", ",", "type", "=", "str", ",", "nargs", "=", "'*'", ",", "help", "=", "\"names of the icons to export (or 'ALL' for all icons)\"", ")", "exp_group", ".", "add_argument", "(", "'--size'", ",", "type", "=", "int", ",", "default", "=", "16", ",", "help", "=", "\"icon size in pixels (default: 16)\"", ")", "exp_group", ".", "add_argument", "(", "'--scale'", ",", "type", "=", "str", ",", "default", "=", "'auto'", ",", "help", "=", "\"scaling factor between 0 and 1, or 'auto' for automatic scaling \"", "\"(default: auto); be careful, as setting it may lead to icons \"", "\"being cropped\"", ")", "exp_group", ".", "add_argument", "(", "'--color'", ",", "type", "=", "str", ",", "default", "=", "'black'", ",", "help", "=", "\"color name or hex value (default: black)\"", ")", "exp_group", ".", "add_argument", "(", "'--filename'", ",", "type", "=", "str", ",", "help", "=", "\"name of the output file (without '.png' extension); \"", "\"it's used as a prefix if multiple icons are exported\"", ")", "exp_group", ".", "add_argument", "(", "'--keep_prefix'", ",", "default", "=", "False", ",", "action", "=", "'store_true'", ",", "help", "=", "\"do not remove common icon prefix \"", "\"(i.e. 'fa-arrow-right' instead of 'arrow-right')\"", ")", "args", "=", "parser", ".", "parse_args", "(", "arguments", ")", "# Parse '--download' argument first", "if", "args", ".", "download", ":", "downloader", "=", "download_icon_font", "(", "args", ".", "download", ",", "os", ".", "getcwd", "(", ")", ")", "downloader", ".", "download_files", "(", ")", "print", "(", "\"Icon font '{name}' successfully downloaded\"", ".", "format", "(", "name", "=", "args", ".", "download", ")", ")", "parser", ".", "exit", "(", ")", "# If not '--download', then css and tff files are required", "if", "not", "args", ".", "css", "or", "not", "args", ".", "ttf", ":", "parser", ".", "error", "(", "\"You have to provide CSS and TTF files\"", ")", "icon_font", "=", "IconFont", "(", "css_file", "=", "args", ".", "css", ".", "name", ",", "ttf_file", "=", "args", ".", "ttf", ".", "name", ",", "keep_prefix", "=", "args", ".", "keep_prefix", ")", "args", ".", "css", ".", "close", "(", ")", "args", ".", "ttf", ".", "close", "(", ")", "# Then '--list'", "if", "args", ".", "list", ":", "for", "icon", "in", "icon_font", ".", "css_icons", ".", "keys", "(", ")", ":", "print", "(", "icon", ")", "parser", ".", "exit", "(", ")", "# If not '--list' or '--download', parse passed icons", "selected_icons", "=", "list", "(", ")", "if", "not", "args", ".", "icons", ":", "parser", ".", "error", "(", "\"You have to pass at least one icon name\"", ")", "elif", "args", ".", "icons", "==", "[", "'ALL'", "]", ":", "selected_icons", "=", "icon_font", ".", "css_icons", ".", "keys", "(", ")", "else", ":", "for", "icon", "in", "args", ".", "icons", ":", "if", "(", "args", ".", "keep_prefix", "and", "not", "icon", ".", "startswith", "(", "icon_font", ".", "common_prefix", ")", ")", ":", "# Prepend icon name with prefix", "icon", "=", "icon_font", ".", "common_prefix", "+", "icon", "elif", "(", "not", "args", ".", "keep_prefix", "and", "icon", ".", "startswith", "(", "icon_font", ".", "common_prefix", ")", ")", ":", "# Remove prefix from icon name", "icon", "=", "icon", "[", "len", "(", "icon_font", ".", "common_prefix", ")", ":", "]", "# Check if given icon names exist", "if", "icon", "in", "icon_font", ".", "css_icons", ":", "selected_icons", ".", "append", "(", "icon", ")", "else", ":", "parser", ".", "error", "(", "\"Unknown icon name '{icon}'\"", ".", "format", "(", "icon", "=", "icon", ")", ")", "# Parse filename and remove the extension if necessary", "given_filename", "=", "args", ".", "filename", "or", "''", "if", "given_filename", ".", "lower", "(", ")", ".", "endswith", "(", "'.png'", ")", ":", "given_filename", "=", "given_filename", "[", ":", "-", "4", "]", "# Some fonts have empty values", "# (prefix only - which we remove - for common styles)", "selected_icons", "=", "list", "(", "filter", "(", "None", ",", "selected_icons", ")", ")", "# Commence exporting", "for", "icon", "in", "selected_icons", ":", "if", "len", "(", "selected_icons", ")", ">", "1", ":", "# Multiple icons - treat the filename option as name prefix", "filename", "=", "'{prefix}{icon}.png'", ".", "format", "(", "prefix", "=", "given_filename", ",", "icon", "=", "icon", ",", ")", "else", ":", "if", "given_filename", ":", "# Use the specified filename", "filename", "=", "given_filename", "+", "'.png'", "else", ":", "# Use icon name as filename", "filename", "=", "str", "(", "icon", ")", "+", "'.png'", "print", "(", "\"Exporting icon '{icon}' as '{filename}'\"", "\"({size}x{size} pixels)\"", ".", "format", "(", "icon", "=", "icon", ",", "filename", "=", "filename", ",", "size", "=", "args", ".", "size", ")", ")", "icon_font", ".", "export_icon", "(", "icon", "=", "icon", ",", "filename", "=", "filename", ",", "size", "=", "args", ".", "size", ",", "color", "=", "args", ".", "color", ",", "scale", "=", "args", ".", "scale", ")", "print", "(", ")", "print", "(", "\"All done\"", ")" ]
Main function for command line usage
[ "Main", "function", "for", "command", "line", "usage" ]
python
train
31.883871
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/tailf_confd_monitoring.py#L341-L353
def confd_state_rest_listen_tcp_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") rest = ET.SubElement(confd_state, "rest") listen = ET.SubElement(rest, "listen") tcp = ET.SubElement(listen, "tcp") port = ET.SubElement(tcp, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "confd_state_rest_listen_tcp_port", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "confd_state", "=", "ET", ".", "SubElement", "(", "config", ",", "\"confd-state\"", ",", "xmlns", "=", "\"http://tail-f.com/yang/confd-monitoring\"", ")", "rest", "=", "ET", ".", "SubElement", "(", "confd_state", ",", "\"rest\"", ")", "listen", "=", "ET", ".", "SubElement", "(", "rest", ",", "\"listen\"", ")", "tcp", "=", "ET", ".", "SubElement", "(", "listen", ",", "\"tcp\"", ")", "port", "=", "ET", ".", "SubElement", "(", "tcp", ",", "\"port\"", ")", "port", ".", "text", "=", "kwargs", ".", "pop", "(", "'port'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
41.692308
hydpy-dev/hydpy
hydpy/auxs/armatools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/armatools.py#L257-L263
def moments(self): """The first two time delay weighted statistical moments of the MA coefficients.""" moment1 = statstools.calc_mean_time(self.delays, self.coefs) moment2 = statstools.calc_mean_time_deviation( self.delays, self.coefs, moment1) return numpy.array([moment1, moment2])
[ "def", "moments", "(", "self", ")", ":", "moment1", "=", "statstools", ".", "calc_mean_time", "(", "self", ".", "delays", ",", "self", ".", "coefs", ")", "moment2", "=", "statstools", ".", "calc_mean_time_deviation", "(", "self", ".", "delays", ",", "self", ".", "coefs", ",", "moment1", ")", "return", "numpy", ".", "array", "(", "[", "moment1", ",", "moment2", "]", ")" ]
The first two time delay weighted statistical moments of the MA coefficients.
[ "The", "first", "two", "time", "delay", "weighted", "statistical", "moments", "of", "the", "MA", "coefficients", "." ]
python
train
47
thiagopbueno/pyrddl
pyrddl/rddl.py
https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/rddl.py#L214-L223
def interm_range_type(self) -> Sequence[str]: '''The range type of each intermediate fluent in canonical order. Returns: Sequence[str]: A tuple of range types representing the range of each fluent. ''' fluents = self.domain.intermediate_fluents ordering = self.domain.interm_fluent_ordering return self._fluent_range_type(fluents, ordering)
[ "def", "interm_range_type", "(", "self", ")", "->", "Sequence", "[", "str", "]", ":", "fluents", "=", "self", ".", "domain", ".", "intermediate_fluents", "ordering", "=", "self", ".", "domain", ".", "interm_fluent_ordering", "return", "self", ".", "_fluent_range_type", "(", "fluents", ",", "ordering", ")" ]
The range type of each intermediate fluent in canonical order. Returns: Sequence[str]: A tuple of range types representing the range of each fluent.
[ "The", "range", "type", "of", "each", "intermediate", "fluent", "in", "canonical", "order", "." ]
python
train
40.4
Bystroushaak/pyDHTMLParser
src/dhtmlparser/htmlelement/html_parser.py
https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/htmlelement/html_parser.py#L138-L157
def _init_tag_params(self, tag, params): """ Alternative constructor used when the tag parameters are added to the HTMLElement (HTMLElement(tag, params)). This method just creates string and then pass it to the :meth:`_init_tag`. Args: tag (str): HTML tag as string. params (dict): HTML tag parameters as dictionary. """ self._element = tag self.params = params self._parseTagName() self._istag = True self._isendtag = False self._isnonpairtag = False self._element = self.tagToString()
[ "def", "_init_tag_params", "(", "self", ",", "tag", ",", "params", ")", ":", "self", ".", "_element", "=", "tag", "self", ".", "params", "=", "params", "self", ".", "_parseTagName", "(", ")", "self", ".", "_istag", "=", "True", "self", ".", "_isendtag", "=", "False", "self", ".", "_isnonpairtag", "=", "False", "self", ".", "_element", "=", "self", ".", "tagToString", "(", ")" ]
Alternative constructor used when the tag parameters are added to the HTMLElement (HTMLElement(tag, params)). This method just creates string and then pass it to the :meth:`_init_tag`. Args: tag (str): HTML tag as string. params (dict): HTML tag parameters as dictionary.
[ "Alternative", "constructor", "used", "when", "the", "tag", "parameters", "are", "added", "to", "the", "HTMLElement", "(", "HTMLElement", "(", "tag", "params", "))", "." ]
python
train
30.3
chrismattmann/tika-python
tika/tika.py
https://github.com/chrismattmann/tika-python/blob/ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a/tika/tika.py#L741-L769
def checkPortIsOpen(remoteServerHost=ServerHost, port = Port): ''' Checks if the specified port is open :param remoteServerHost: the host address :param port: port which needs to be checked :return: ``True`` if port is open, ``False`` otherwise ''' remoteServerIP = socket.gethostbyname(remoteServerHost) try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((remoteServerIP, int(port))) if result == 0: return True else : return False sock.close() #FIXME: the above line is unreachable except KeyboardInterrupt: print("You pressed Ctrl+C") sys.exit() except socket.gaierror: print('Hostname could not be resolved. Exiting') sys.exit() except socket.error: print("Couldn't connect to server") sys.exit()
[ "def", "checkPortIsOpen", "(", "remoteServerHost", "=", "ServerHost", ",", "port", "=", "Port", ")", ":", "remoteServerIP", "=", "socket", ".", "gethostbyname", "(", "remoteServerHost", ")", "try", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "result", "=", "sock", ".", "connect_ex", "(", "(", "remoteServerIP", ",", "int", "(", "port", ")", ")", ")", "if", "result", "==", "0", ":", "return", "True", "else", ":", "return", "False", "sock", ".", "close", "(", ")", "#FIXME: the above line is unreachable", "except", "KeyboardInterrupt", ":", "print", "(", "\"You pressed Ctrl+C\"", ")", "sys", ".", "exit", "(", ")", "except", "socket", ".", "gaierror", ":", "print", "(", "'Hostname could not be resolved. Exiting'", ")", "sys", ".", "exit", "(", ")", "except", "socket", ".", "error", ":", "print", "(", "\"Couldn't connect to server\"", ")", "sys", ".", "exit", "(", ")" ]
Checks if the specified port is open :param remoteServerHost: the host address :param port: port which needs to be checked :return: ``True`` if port is open, ``False`` otherwise
[ "Checks", "if", "the", "specified", "port", "is", "open", ":", "param", "remoteServerHost", ":", "the", "host", "address", ":", "param", "port", ":", "port", "which", "needs", "to", "be", "checked", ":", "return", ":", "True", "if", "port", "is", "open", "False", "otherwise" ]
python
train
30.206897
bast/flanders
cmake/autocmake/configure.py
https://github.com/bast/flanders/blob/792f9eed8511cb553e67a25b6c5ce60fd6ae97bc/cmake/autocmake/configure.py#L14-L33
def check_cmake_exists(cmake_command): """ Check whether CMake is installed. If not, print informative error message and quits. """ from subprocess import Popen, PIPE p = Popen( '{0} --version'.format(cmake_command), shell=True, stdin=PIPE, stdout=PIPE) if not ('cmake version' in p.communicate()[0].decode('UTF-8')): sys.stderr.write(' This code is built using CMake\n\n') sys.stderr.write(' CMake is not found\n') sys.stderr.write(' get CMake at http://www.cmake.org/\n') sys.stderr.write(' on many clusters CMake is installed\n') sys.stderr.write(' but you have to load it first:\n') sys.stderr.write(' $ module load cmake\n') sys.exit(1)
[ "def", "check_cmake_exists", "(", "cmake_command", ")", ":", "from", "subprocess", "import", "Popen", ",", "PIPE", "p", "=", "Popen", "(", "'{0} --version'", ".", "format", "(", "cmake_command", ")", ",", "shell", "=", "True", ",", "stdin", "=", "PIPE", ",", "stdout", "=", "PIPE", ")", "if", "not", "(", "'cmake version'", "in", "p", ".", "communicate", "(", ")", "[", "0", "]", ".", "decode", "(", "'UTF-8'", ")", ")", ":", "sys", ".", "stderr", ".", "write", "(", "' This code is built using CMake\\n\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' CMake is not found\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' get CMake at http://www.cmake.org/\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' on many clusters CMake is installed\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' but you have to load it first:\\n'", ")", "sys", ".", "stderr", ".", "write", "(", "' $ module load cmake\\n'", ")", "sys", ".", "exit", "(", "1", ")" ]
Check whether CMake is installed. If not, print informative error message and quits.
[ "Check", "whether", "CMake", "is", "installed", ".", "If", "not", "print", "informative", "error", "message", "and", "quits", "." ]
python
train
37.55
ThreatConnect-Inc/tcex
tcex/tcex_ti_indicator.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti_indicator.py#L274-L291
def occurrence(self, file_name=None, path=None, date=None): """Add a file Occurrence. Args: file_name (str, optional): The file name for this occurrence. path (str, optional): The file path for this occurrence. date (str, optional): The datetime expression for this occurrence. Returns: obj: An instance of Occurrence. """ if self._indicator_data.get('type') != 'File': # Indicator object has no logger to output warning return None occurrence_obj = FileOccurrence(file_name, path, date) self._occurrences.append(occurrence_obj) return occurrence_obj
[ "def", "occurrence", "(", "self", ",", "file_name", "=", "None", ",", "path", "=", "None", ",", "date", "=", "None", ")", ":", "if", "self", ".", "_indicator_data", ".", "get", "(", "'type'", ")", "!=", "'File'", ":", "# Indicator object has no logger to output warning", "return", "None", "occurrence_obj", "=", "FileOccurrence", "(", "file_name", ",", "path", ",", "date", ")", "self", ".", "_occurrences", ".", "append", "(", "occurrence_obj", ")", "return", "occurrence_obj" ]
Add a file Occurrence. Args: file_name (str, optional): The file name for this occurrence. path (str, optional): The file path for this occurrence. date (str, optional): The datetime expression for this occurrence. Returns: obj: An instance of Occurrence.
[ "Add", "a", "file", "Occurrence", "." ]
python
train
37.333333
Nic30/hwtGraph
hwtGraph/elk/fromHwt/utils.py
https://github.com/Nic30/hwtGraph/blob/6b7d4fdd759f263a0fdd2736f02f123e44e4354f/hwtGraph/elk/fromHwt/utils.py#L187-L211
def _addPort(n: LNode, lp: LPort, intf: Interface, reverseDirection=False): """ add port to LPort for interface """ origin = originObjOfPort(intf) d = intf._direction d = PortTypeFromDir(d) if reverseDirection: d = PortType.opposite(d) new_lp = LPort(lp, d, lp.side, name=intf._name) new_lp.originObj = origin if intf._interfaces: for child_intf in intf._interfaces: _addPort(n, new_lp, child_intf, reverseDirection=reverseDirection) lp.children.append(new_lp) new_lp.parent = lp if n._node2lnode is not None: n._node2lnode[origin] = new_lp return new_lp
[ "def", "_addPort", "(", "n", ":", "LNode", ",", "lp", ":", "LPort", ",", "intf", ":", "Interface", ",", "reverseDirection", "=", "False", ")", ":", "origin", "=", "originObjOfPort", "(", "intf", ")", "d", "=", "intf", ".", "_direction", "d", "=", "PortTypeFromDir", "(", "d", ")", "if", "reverseDirection", ":", "d", "=", "PortType", ".", "opposite", "(", "d", ")", "new_lp", "=", "LPort", "(", "lp", ",", "d", ",", "lp", ".", "side", ",", "name", "=", "intf", ".", "_name", ")", "new_lp", ".", "originObj", "=", "origin", "if", "intf", ".", "_interfaces", ":", "for", "child_intf", "in", "intf", ".", "_interfaces", ":", "_addPort", "(", "n", ",", "new_lp", ",", "child_intf", ",", "reverseDirection", "=", "reverseDirection", ")", "lp", ".", "children", ".", "append", "(", "new_lp", ")", "new_lp", ".", "parent", "=", "lp", "if", "n", ".", "_node2lnode", "is", "not", "None", ":", "n", ".", "_node2lnode", "[", "origin", "]", "=", "new_lp", "return", "new_lp" ]
add port to LPort for interface
[ "add", "port", "to", "LPort", "for", "interface" ]
python
train
26.4
iotile/coretools
transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/native_ble/iotile_transport_native_ble/device_adapter.py#L984-L994
def periodic_callback(self): """Periodic cleanup tasks to maintain this adapter, should be called every second. """ if self.stopped: return # Check if we should start scanning again if not self.scanning and len(self.connections.get_connections()) == 0: self._logger.info("Restarting scan for devices") self.start_scan(self._active_scan) self._logger.info("Finished restarting scan for devices")
[ "def", "periodic_callback", "(", "self", ")", ":", "if", "self", ".", "stopped", ":", "return", "# Check if we should start scanning again", "if", "not", "self", ".", "scanning", "and", "len", "(", "self", ".", "connections", ".", "get_connections", "(", ")", ")", "==", "0", ":", "self", ".", "_logger", ".", "info", "(", "\"Restarting scan for devices\"", ")", "self", ".", "start_scan", "(", "self", ".", "_active_scan", ")", "self", ".", "_logger", ".", "info", "(", "\"Finished restarting scan for devices\"", ")" ]
Periodic cleanup tasks to maintain this adapter, should be called every second.
[ "Periodic", "cleanup", "tasks", "to", "maintain", "this", "adapter", "should", "be", "called", "every", "second", "." ]
python
train
42.363636
blockstack/blockstack-core
blockstack/lib/atlas.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/atlas.py#L2177-L2266
def atlas_find_missing_zonefile_availability( peer_table=None, con=None, path=None, missing_zonefile_info=None ): """ Find the set of missing zonefiles, as well as their popularity amongst our neighbors. Only consider zonefiles that are known by at least one peer; otherwise they're missing from our clique (and we'll re-sync our neighborss' inventories every so often to make sure we detect when zonefiles become available). Return a dict, structured as: { 'zonefile hash': { 'names': [names], 'txid': first txid that set it, 'indexes': [...], 'popularity': ..., 'peers': [...], 'tried_storage': True|False } } """ # which zonefiles do we have? bit_offset = 0 bit_count = 10000 missing = [] ret = {} if missing_zonefile_info is None: while True: zfinfo = atlasdb_zonefile_find_missing( bit_offset, bit_count, con=con, path=path ) if len(zfinfo) == 0: break missing += zfinfo bit_offset += len(zfinfo) if len(missing) > 0: log.debug("Missing %s zonefiles" % len(missing)) else: missing = missing_zonefile_info if len(missing) == 0: # none! return ret with AtlasPeerTableLocked(peer_table) as ptbl: # do any other peers have this zonefile? for zfinfo in missing: popularity = 0 byte_index = (zfinfo['inv_index'] - 1) / 8 bit_index = 7 - ((zfinfo['inv_index'] - 1) % 8) peers = [] if not ret.has_key(zfinfo['zonefile_hash']): ret[zfinfo['zonefile_hash']] = { 'names': [], 'txid': zfinfo['txid'], 'indexes': [], 'block_heights': [], 'popularity': 0, 'peers': [], 'tried_storage': False } for peer_hostport in ptbl.keys(): peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl ) if len(peer_inv) <= byte_index: # too new for this peer continue if (ord(peer_inv[byte_index]) & (1 << bit_index)) == 0: # this peer doesn't have it continue if peer_hostport not in ret[zfinfo['zonefile_hash']]['peers']: popularity += 1 peers.append( peer_hostport ) ret[zfinfo['zonefile_hash']]['names'].append( zfinfo['name'] ) ret[zfinfo['zonefile_hash']]['indexes'].append( zfinfo['inv_index']-1 ) ret[zfinfo['zonefile_hash']]['block_heights'].append( zfinfo['block_height'] ) ret[zfinfo['zonefile_hash']]['popularity'] += popularity ret[zfinfo['zonefile_hash']]['peers'] += peers ret[zfinfo['zonefile_hash']]['tried_storage'] = zfinfo['tried_storage'] return ret
[ "def", "atlas_find_missing_zonefile_availability", "(", "peer_table", "=", "None", ",", "con", "=", "None", ",", "path", "=", "None", ",", "missing_zonefile_info", "=", "None", ")", ":", "# which zonefiles do we have?", "bit_offset", "=", "0", "bit_count", "=", "10000", "missing", "=", "[", "]", "ret", "=", "{", "}", "if", "missing_zonefile_info", "is", "None", ":", "while", "True", ":", "zfinfo", "=", "atlasdb_zonefile_find_missing", "(", "bit_offset", ",", "bit_count", ",", "con", "=", "con", ",", "path", "=", "path", ")", "if", "len", "(", "zfinfo", ")", "==", "0", ":", "break", "missing", "+=", "zfinfo", "bit_offset", "+=", "len", "(", "zfinfo", ")", "if", "len", "(", "missing", ")", ">", "0", ":", "log", ".", "debug", "(", "\"Missing %s zonefiles\"", "%", "len", "(", "missing", ")", ")", "else", ":", "missing", "=", "missing_zonefile_info", "if", "len", "(", "missing", ")", "==", "0", ":", "# none!", "return", "ret", "with", "AtlasPeerTableLocked", "(", "peer_table", ")", "as", "ptbl", ":", "# do any other peers have this zonefile?", "for", "zfinfo", "in", "missing", ":", "popularity", "=", "0", "byte_index", "=", "(", "zfinfo", "[", "'inv_index'", "]", "-", "1", ")", "/", "8", "bit_index", "=", "7", "-", "(", "(", "zfinfo", "[", "'inv_index'", "]", "-", "1", ")", "%", "8", ")", "peers", "=", "[", "]", "if", "not", "ret", ".", "has_key", "(", "zfinfo", "[", "'zonefile_hash'", "]", ")", ":", "ret", "[", "zfinfo", "[", "'zonefile_hash'", "]", "]", "=", "{", "'names'", ":", "[", "]", ",", "'txid'", ":", "zfinfo", "[", "'txid'", "]", ",", "'indexes'", ":", "[", "]", ",", "'block_heights'", ":", "[", "]", ",", "'popularity'", ":", "0", ",", "'peers'", ":", "[", "]", ",", "'tried_storage'", ":", "False", "}", "for", "peer_hostport", "in", "ptbl", ".", "keys", "(", ")", ":", "peer_inv", "=", "atlas_peer_get_zonefile_inventory", "(", "peer_hostport", ",", "peer_table", "=", "ptbl", ")", "if", "len", "(", "peer_inv", ")", "<=", "byte_index", ":", "# too new for this peer", "continue", "if", "(", "ord", "(", "peer_inv", "[", "byte_index", "]", ")", "&", "(", "1", "<<", "bit_index", ")", ")", "==", "0", ":", "# this peer doesn't have it", "continue", "if", "peer_hostport", "not", "in", "ret", "[", "zfinfo", "[", "'zonefile_hash'", "]", "]", "[", "'peers'", "]", ":", "popularity", "+=", "1", "peers", ".", "append", "(", "peer_hostport", ")", "ret", "[", "zfinfo", "[", "'zonefile_hash'", "]", "]", "[", "'names'", "]", ".", "append", "(", "zfinfo", "[", "'name'", "]", ")", "ret", "[", "zfinfo", "[", "'zonefile_hash'", "]", "]", "[", "'indexes'", "]", ".", "append", "(", "zfinfo", "[", "'inv_index'", "]", "-", "1", ")", "ret", "[", "zfinfo", "[", "'zonefile_hash'", "]", "]", "[", "'block_heights'", "]", ".", "append", "(", "zfinfo", "[", "'block_height'", "]", ")", "ret", "[", "zfinfo", "[", "'zonefile_hash'", "]", "]", "[", "'popularity'", "]", "+=", "popularity", "ret", "[", "zfinfo", "[", "'zonefile_hash'", "]", "]", "[", "'peers'", "]", "+=", "peers", "ret", "[", "zfinfo", "[", "'zonefile_hash'", "]", "]", "[", "'tried_storage'", "]", "=", "zfinfo", "[", "'tried_storage'", "]", "return", "ret" ]
Find the set of missing zonefiles, as well as their popularity amongst our neighbors. Only consider zonefiles that are known by at least one peer; otherwise they're missing from our clique (and we'll re-sync our neighborss' inventories every so often to make sure we detect when zonefiles become available). Return a dict, structured as: { 'zonefile hash': { 'names': [names], 'txid': first txid that set it, 'indexes': [...], 'popularity': ..., 'peers': [...], 'tried_storage': True|False } }
[ "Find", "the", "set", "of", "missing", "zonefiles", "as", "well", "as", "their", "popularity", "amongst", "our", "neighbors", "." ]
python
train
33.333333
drastus/unicover
unicover/unicover.py
https://github.com/drastus/unicover/blob/4702d0151c63d525c25718a838396afe62302255/unicover/unicover.py#L238-L247
def _getCharFont(self, font_files, code_point): """ Returns font files containing given code point. """ return_font_files = [] for font_file in font_files: face = ft.Face(font_file) if face.get_char_index(code_point): return_font_files.append(font_file) return return_font_files
[ "def", "_getCharFont", "(", "self", ",", "font_files", ",", "code_point", ")", ":", "return_font_files", "=", "[", "]", "for", "font_file", "in", "font_files", ":", "face", "=", "ft", ".", "Face", "(", "font_file", ")", "if", "face", ".", "get_char_index", "(", "code_point", ")", ":", "return_font_files", ".", "append", "(", "font_file", ")", "return", "return_font_files" ]
Returns font files containing given code point.
[ "Returns", "font", "files", "containing", "given", "code", "point", "." ]
python
train
35.7
CalebBell/thermo
thermo/heat_capacity.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/heat_capacity.py#L1517-L1557
def Zabransky_quasi_polynomial_integral(T, Tc, a1, a2, a3, a4, a5, a6): r'''Calculates the integral of liquid heat capacity using the quasi-polynomial model developed in [1]_. Parameters ---------- T : float Temperature [K] a1-a6 : float Coefficients Returns ------- H : float Difference in enthalpy from 0 K, [J/mol] Notes ----- The analytical integral was derived with SymPy; it is a simple polynomial plus some logarithms. Examples -------- >>> H2 = Zabransky_quasi_polynomial_integral(300, 591.79, -3.12743, ... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989) >>> H1 = Zabransky_quasi_polynomial_integral(200, 591.79, -3.12743, ... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989) >>> H2 - H1 14662.026406892925 References ---------- .. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski. Heat Capacity of Liquids: Critical Review and Recommended Values. 2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996. ''' Tc2 = Tc*Tc Tc3 = Tc2*Tc term = T - Tc return R*(T*(T*(T*(T*a6/(4.*Tc3) + a5/(3.*Tc2)) + a4/(2.*Tc)) - a1 + a3) + T*a1*log(1. - T/Tc) - 0.5*Tc*(a1 + a2)*log(term*term))
[ "def", "Zabransky_quasi_polynomial_integral", "(", "T", ",", "Tc", ",", "a1", ",", "a2", ",", "a3", ",", "a4", ",", "a5", ",", "a6", ")", ":", "Tc2", "=", "Tc", "*", "Tc", "Tc3", "=", "Tc2", "*", "Tc", "term", "=", "T", "-", "Tc", "return", "R", "*", "(", "T", "*", "(", "T", "*", "(", "T", "*", "(", "T", "*", "a6", "/", "(", "4.", "*", "Tc3", ")", "+", "a5", "/", "(", "3.", "*", "Tc2", ")", ")", "+", "a4", "/", "(", "2.", "*", "Tc", ")", ")", "-", "a1", "+", "a3", ")", "+", "T", "*", "a1", "*", "log", "(", "1.", "-", "T", "/", "Tc", ")", "-", "0.5", "*", "Tc", "*", "(", "a1", "+", "a2", ")", "*", "log", "(", "term", "*", "term", ")", ")" ]
r'''Calculates the integral of liquid heat capacity using the quasi-polynomial model developed in [1]_. Parameters ---------- T : float Temperature [K] a1-a6 : float Coefficients Returns ------- H : float Difference in enthalpy from 0 K, [J/mol] Notes ----- The analytical integral was derived with SymPy; it is a simple polynomial plus some logarithms. Examples -------- >>> H2 = Zabransky_quasi_polynomial_integral(300, 591.79, -3.12743, ... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989) >>> H1 = Zabransky_quasi_polynomial_integral(200, 591.79, -3.12743, ... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989) >>> H2 - H1 14662.026406892925 References ---------- .. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski. Heat Capacity of Liquids: Critical Review and Recommended Values. 2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996.
[ "r", "Calculates", "the", "integral", "of", "liquid", "heat", "capacity", "using", "the", "quasi", "-", "polynomial", "model", "developed", "in", "[", "1", "]", "_", "." ]
python
valid
30.292683
clalancette/pycdlib
pycdlib/pycdlib.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L2375-L2530
def _open_fp(self, fp): # type: (BinaryIO) -> None ''' An internal method to open an existing ISO for inspection and modification. Note that the file object passed in here must stay open for the lifetime of this object, as the PyCdlib class uses it internally to do writing and reading operations. Parameters: fp - The file object containing the ISO to open up. Returns: Nothing. ''' if hasattr(fp, 'mode') and 'b' not in fp.mode: raise pycdlibexception.PyCdlibInvalidInput("The file to open must be in binary mode (add 'b' to the open flags)") self._cdfp = fp # Get the Primary Volume Descriptor (pvd), the set of Supplementary # Volume Descriptors (svds), the set of Volume Partition # Descriptors (vpds), the set of Boot Records (brs), and the set of # Volume Descriptor Set Terminators (vdsts) self._parse_volume_descriptors() old = self._cdfp.tell() self._cdfp.seek(0) tmp_mbr = isohybrid.IsoHybrid() if tmp_mbr.parse(self._cdfp.read(512)): # We only save the object if it turns out to be a valid IsoHybrid self.isohybrid_mbr = tmp_mbr self._cdfp.seek(old) if self.pvd.application_use[141:149] == b'CD-XA001': self.xa = True for br in self.brs: self._check_and_parse_eltorito(br) # Now that we have the PVD, parse the Path Tables according to Ecma-119 # section 9.4. We want to ensure that the big endian versions agree # with the little endian ones (to make sure it is a valid ISO). # Little Endian first le_ptrs, extent_to_ptr = self._parse_path_table(self.pvd.path_table_size(), self.pvd.path_table_location_le) # Big Endian next. tmp_be_ptrs, e_unused = self._parse_path_table(self.pvd.path_table_size(), self.pvd.path_table_location_be) for index, ptr in enumerate(le_ptrs): if not ptr.equal_to_be(tmp_be_ptrs[index]): raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian path table records do not agree') self.interchange_level = 1 for svd in self.svds: if svd.version == 2 and svd.file_structure_version == 2: self.interchange_level = 4 break extent_to_inode = {} # type: Dict[int, inode.Inode] # OK, so now that we have the PVD, we start at its root directory # record and find all of the files ic_level, lastbyte = self._walk_directories(self.pvd, extent_to_ptr, extent_to_inode, le_ptrs) self.interchange_level = max(self.interchange_level, ic_level) # On El Torito ISOs, after we have walked the directories we look # to see if all of the entries in El Torito have corresponding # directory records. If they don't, then it may be the case that # the El Torito bits of the system are 'hidden' or 'unlinked', # meaning that they take up space but have no corresponding directory # record in the ISO filesystem. In order to accommodate the rest # of the system, which really expects these things to have directory # records, we use fake directory records that don't get written out. # # Note that we specifically do *not* add these to any sort of parent; # that way, we don't run afoul of any checks that adding a child to a # parent might have. This means that if we do ever want to unhide this # entry, we'll have to do some additional work to give it a real name # and link it to the appropriate parent. if self.eltorito_boot_catalog is not None: self._link_eltorito(extent_to_inode) # Now that everything has a dirrecord, see if we have a boot # info table. self._check_for_eltorito_boot_info_table(self.eltorito_boot_catalog.initial_entry.inode) for sec in self.eltorito_boot_catalog.sections: for entry in sec.section_entries: self._check_for_eltorito_boot_info_table(entry.inode) # The PVD is finished. Now look to see if we need to parse the SVD. for svd in self.svds: if (svd.flags & 0x1) == 0 and svd.escape_sequences[:3] in (b'%/@', b'%/C', b'%/E'): if self.joliet_vd is not None: raise pycdlibexception.PyCdlibInvalidISO('Only a single Joliet SVD is supported') self.joliet_vd = svd le_ptrs, joliet_extent_to_ptr = self._parse_path_table(svd.path_table_size(), svd.path_table_location_le) tmp_be_ptrs, j_unused = self._parse_path_table(svd.path_table_size(), svd.path_table_location_be) for index, ptr in enumerate(le_ptrs): if not ptr.equal_to_be(tmp_be_ptrs[index]): raise pycdlibexception.PyCdlibInvalidISO('Joliet little-endian and big-endian path table records do not agree') self._walk_directories(svd, joliet_extent_to_ptr, extent_to_inode, le_ptrs) elif svd.version == 2 and svd.file_structure_version == 2: if self.enhanced_vd is not None: raise pycdlibexception.PyCdlibInvalidISO('Only a single enhanced VD is supported') self.enhanced_vd = svd # We've seen ISOs in the wild (Office XP) that have a PVD space size # that is smaller than the location of the last directory record # extent + length. If we see this, automatically update the size in the # PVD (and any SVDs) so that subsequent operations will be correct. log_block_size = self.pvd.logical_block_size() if lastbyte > self.pvd.space_size * log_block_size: new_pvd_size = utils.ceiling_div(lastbyte, log_block_size) for pvd in self.pvds: pvd.space_size = new_pvd_size if self.joliet_vd is not None: self.joliet_vd.space_size = new_pvd_size if self.enhanced_vd is not None: self.enhanced_vd.space_size = new_pvd_size # Look to see if this is a UDF volume. It is one if we have a UDF BEA, # UDF NSR, and UDF TEA, in which case we parse the UDF descriptors and # walk the filesystem. if self._has_udf: self._parse_udf_descriptors() self._walk_udf_directories(extent_to_inode) # Now we look for the 'version' volume descriptor, common on ISOs made # with genisoimage or mkisofs. This volume descriptor doesn't have any # specification, but from code inspection, it is either a completely # zero extent, or starts with 'MKI'. Further, it starts directly after # the VDST, or directly after the UDF recognition sequence (if this is # a UDF ISO). Thus, we go looking for it at those places, and add it # if we find it there. version_vd_extent = self.vdsts[0].extent_location() + 1 if self._has_udf: version_vd_extent = self.udf_tea.extent_location() + 1 version_vd = headervd.VersionVolumeDescriptor() self._cdfp.seek(version_vd_extent * log_block_size) if version_vd.parse(self._cdfp.read(log_block_size), version_vd_extent): self.version_vd = version_vd self._initialized = True
[ "def", "_open_fp", "(", "self", ",", "fp", ")", ":", "# type: (BinaryIO) -> None", "if", "hasattr", "(", "fp", ",", "'mode'", ")", "and", "'b'", "not", "in", "fp", ".", "mode", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "\"The file to open must be in binary mode (add 'b' to the open flags)\"", ")", "self", ".", "_cdfp", "=", "fp", "# Get the Primary Volume Descriptor (pvd), the set of Supplementary", "# Volume Descriptors (svds), the set of Volume Partition", "# Descriptors (vpds), the set of Boot Records (brs), and the set of", "# Volume Descriptor Set Terminators (vdsts)", "self", ".", "_parse_volume_descriptors", "(", ")", "old", "=", "self", ".", "_cdfp", ".", "tell", "(", ")", "self", ".", "_cdfp", ".", "seek", "(", "0", ")", "tmp_mbr", "=", "isohybrid", ".", "IsoHybrid", "(", ")", "if", "tmp_mbr", ".", "parse", "(", "self", ".", "_cdfp", ".", "read", "(", "512", ")", ")", ":", "# We only save the object if it turns out to be a valid IsoHybrid", "self", ".", "isohybrid_mbr", "=", "tmp_mbr", "self", ".", "_cdfp", ".", "seek", "(", "old", ")", "if", "self", ".", "pvd", ".", "application_use", "[", "141", ":", "149", "]", "==", "b'CD-XA001'", ":", "self", ".", "xa", "=", "True", "for", "br", "in", "self", ".", "brs", ":", "self", ".", "_check_and_parse_eltorito", "(", "br", ")", "# Now that we have the PVD, parse the Path Tables according to Ecma-119", "# section 9.4. We want to ensure that the big endian versions agree", "# with the little endian ones (to make sure it is a valid ISO).", "# Little Endian first", "le_ptrs", ",", "extent_to_ptr", "=", "self", ".", "_parse_path_table", "(", "self", ".", "pvd", ".", "path_table_size", "(", ")", ",", "self", ".", "pvd", ".", "path_table_location_le", ")", "# Big Endian next.", "tmp_be_ptrs", ",", "e_unused", "=", "self", ".", "_parse_path_table", "(", "self", ".", "pvd", ".", "path_table_size", "(", ")", ",", "self", ".", "pvd", ".", "path_table_location_be", ")", "for", "index", ",", "ptr", "in", "enumerate", "(", "le_ptrs", ")", ":", "if", "not", "ptr", ".", "equal_to_be", "(", "tmp_be_ptrs", "[", "index", "]", ")", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Little-endian and big-endian path table records do not agree'", ")", "self", ".", "interchange_level", "=", "1", "for", "svd", "in", "self", ".", "svds", ":", "if", "svd", ".", "version", "==", "2", "and", "svd", ".", "file_structure_version", "==", "2", ":", "self", ".", "interchange_level", "=", "4", "break", "extent_to_inode", "=", "{", "}", "# type: Dict[int, inode.Inode]", "# OK, so now that we have the PVD, we start at its root directory", "# record and find all of the files", "ic_level", ",", "lastbyte", "=", "self", ".", "_walk_directories", "(", "self", ".", "pvd", ",", "extent_to_ptr", ",", "extent_to_inode", ",", "le_ptrs", ")", "self", ".", "interchange_level", "=", "max", "(", "self", ".", "interchange_level", ",", "ic_level", ")", "# On El Torito ISOs, after we have walked the directories we look", "# to see if all of the entries in El Torito have corresponding", "# directory records. If they don't, then it may be the case that", "# the El Torito bits of the system are 'hidden' or 'unlinked',", "# meaning that they take up space but have no corresponding directory", "# record in the ISO filesystem. In order to accommodate the rest", "# of the system, which really expects these things to have directory", "# records, we use fake directory records that don't get written out.", "#", "# Note that we specifically do *not* add these to any sort of parent;", "# that way, we don't run afoul of any checks that adding a child to a", "# parent might have. This means that if we do ever want to unhide this", "# entry, we'll have to do some additional work to give it a real name", "# and link it to the appropriate parent.", "if", "self", ".", "eltorito_boot_catalog", "is", "not", "None", ":", "self", ".", "_link_eltorito", "(", "extent_to_inode", ")", "# Now that everything has a dirrecord, see if we have a boot", "# info table.", "self", ".", "_check_for_eltorito_boot_info_table", "(", "self", ".", "eltorito_boot_catalog", ".", "initial_entry", ".", "inode", ")", "for", "sec", "in", "self", ".", "eltorito_boot_catalog", ".", "sections", ":", "for", "entry", "in", "sec", ".", "section_entries", ":", "self", ".", "_check_for_eltorito_boot_info_table", "(", "entry", ".", "inode", ")", "# The PVD is finished. Now look to see if we need to parse the SVD.", "for", "svd", "in", "self", ".", "svds", ":", "if", "(", "svd", ".", "flags", "&", "0x1", ")", "==", "0", "and", "svd", ".", "escape_sequences", "[", ":", "3", "]", "in", "(", "b'%/@'", ",", "b'%/C'", ",", "b'%/E'", ")", ":", "if", "self", ".", "joliet_vd", "is", "not", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Only a single Joliet SVD is supported'", ")", "self", ".", "joliet_vd", "=", "svd", "le_ptrs", ",", "joliet_extent_to_ptr", "=", "self", ".", "_parse_path_table", "(", "svd", ".", "path_table_size", "(", ")", ",", "svd", ".", "path_table_location_le", ")", "tmp_be_ptrs", ",", "j_unused", "=", "self", ".", "_parse_path_table", "(", "svd", ".", "path_table_size", "(", ")", ",", "svd", ".", "path_table_location_be", ")", "for", "index", ",", "ptr", "in", "enumerate", "(", "le_ptrs", ")", ":", "if", "not", "ptr", ".", "equal_to_be", "(", "tmp_be_ptrs", "[", "index", "]", ")", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Joliet little-endian and big-endian path table records do not agree'", ")", "self", ".", "_walk_directories", "(", "svd", ",", "joliet_extent_to_ptr", ",", "extent_to_inode", ",", "le_ptrs", ")", "elif", "svd", ".", "version", "==", "2", "and", "svd", ".", "file_structure_version", "==", "2", ":", "if", "self", ".", "enhanced_vd", "is", "not", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidISO", "(", "'Only a single enhanced VD is supported'", ")", "self", ".", "enhanced_vd", "=", "svd", "# We've seen ISOs in the wild (Office XP) that have a PVD space size", "# that is smaller than the location of the last directory record", "# extent + length. If we see this, automatically update the size in the", "# PVD (and any SVDs) so that subsequent operations will be correct.", "log_block_size", "=", "self", ".", "pvd", ".", "logical_block_size", "(", ")", "if", "lastbyte", ">", "self", ".", "pvd", ".", "space_size", "*", "log_block_size", ":", "new_pvd_size", "=", "utils", ".", "ceiling_div", "(", "lastbyte", ",", "log_block_size", ")", "for", "pvd", "in", "self", ".", "pvds", ":", "pvd", ".", "space_size", "=", "new_pvd_size", "if", "self", ".", "joliet_vd", "is", "not", "None", ":", "self", ".", "joliet_vd", ".", "space_size", "=", "new_pvd_size", "if", "self", ".", "enhanced_vd", "is", "not", "None", ":", "self", ".", "enhanced_vd", ".", "space_size", "=", "new_pvd_size", "# Look to see if this is a UDF volume. It is one if we have a UDF BEA,", "# UDF NSR, and UDF TEA, in which case we parse the UDF descriptors and", "# walk the filesystem.", "if", "self", ".", "_has_udf", ":", "self", ".", "_parse_udf_descriptors", "(", ")", "self", ".", "_walk_udf_directories", "(", "extent_to_inode", ")", "# Now we look for the 'version' volume descriptor, common on ISOs made", "# with genisoimage or mkisofs. This volume descriptor doesn't have any", "# specification, but from code inspection, it is either a completely", "# zero extent, or starts with 'MKI'. Further, it starts directly after", "# the VDST, or directly after the UDF recognition sequence (if this is", "# a UDF ISO). Thus, we go looking for it at those places, and add it", "# if we find it there.", "version_vd_extent", "=", "self", ".", "vdsts", "[", "0", "]", ".", "extent_location", "(", ")", "+", "1", "if", "self", ".", "_has_udf", ":", "version_vd_extent", "=", "self", ".", "udf_tea", ".", "extent_location", "(", ")", "+", "1", "version_vd", "=", "headervd", ".", "VersionVolumeDescriptor", "(", ")", "self", ".", "_cdfp", ".", "seek", "(", "version_vd_extent", "*", "log_block_size", ")", "if", "version_vd", ".", "parse", "(", "self", ".", "_cdfp", ".", "read", "(", "log_block_size", ")", ",", "version_vd_extent", ")", ":", "self", ".", "version_vd", "=", "version_vd", "self", ".", "_initialized", "=", "True" ]
An internal method to open an existing ISO for inspection and modification. Note that the file object passed in here must stay open for the lifetime of this object, as the PyCdlib class uses it internally to do writing and reading operations. Parameters: fp - The file object containing the ISO to open up. Returns: Nothing.
[ "An", "internal", "method", "to", "open", "an", "existing", "ISO", "for", "inspection", "and", "modification", ".", "Note", "that", "the", "file", "object", "passed", "in", "here", "must", "stay", "open", "for", "the", "lifetime", "of", "this", "object", "as", "the", "PyCdlib", "class", "uses", "it", "internally", "to", "do", "writing", "and", "reading", "operations", "." ]
python
train
49.083333
google/transitfeed
transitfeed/serviceperiod.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/serviceperiod.py#L148-L160
def SetDayOfWeekHasService(self, dow, has_service=True): """Set service as running (or not) on a day of the week. By default the service does not run on any days. Args: dow: 0 for Monday through 6 for Sunday has_service: True if this service operates on dow, False if it does not. Returns: None """ assert(dow >= 0 and dow < 7) self.day_of_week[dow] = has_service
[ "def", "SetDayOfWeekHasService", "(", "self", ",", "dow", ",", "has_service", "=", "True", ")", ":", "assert", "(", "dow", ">=", "0", "and", "dow", "<", "7", ")", "self", ".", "day_of_week", "[", "dow", "]", "=", "has_service" ]
Set service as running (or not) on a day of the week. By default the service does not run on any days. Args: dow: 0 for Monday through 6 for Sunday has_service: True if this service operates on dow, False if it does not. Returns: None
[ "Set", "service", "as", "running", "(", "or", "not", ")", "on", "a", "day", "of", "the", "week", ".", "By", "default", "the", "service", "does", "not", "run", "on", "any", "days", "." ]
python
train
30.692308
ska-sa/katcp-python
katcp/inspecting_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/inspecting_client.py#L852-L881
def future_check_request(self, name, update=None): """Check if the request exists. Used internally by future_get_request. This method is aware of synchronisation in progress and if inspection of the server is allowed. Parameters ---------- name : str Name of the request to verify. update : bool or None, optional If a katcp request to the server should be made to check if the sensor is on the server. True = Allow, False do not Allow, None use the class default. Notes ----- Ensure that self.state.data_synced == True if yielding to future_check_request from a state-change callback, or a deadlock will occur. """ exist = False yield self.until_data_synced() if name in self._requests_index: exist = True else: if update or (update is None and self._update_on_lookup): yield self.inspect_requests(name) exist = yield self.future_check_request(name, False) raise tornado.gen.Return(exist)
[ "def", "future_check_request", "(", "self", ",", "name", ",", "update", "=", "None", ")", ":", "exist", "=", "False", "yield", "self", ".", "until_data_synced", "(", ")", "if", "name", "in", "self", ".", "_requests_index", ":", "exist", "=", "True", "else", ":", "if", "update", "or", "(", "update", "is", "None", "and", "self", ".", "_update_on_lookup", ")", ":", "yield", "self", ".", "inspect_requests", "(", "name", ")", "exist", "=", "yield", "self", ".", "future_check_request", "(", "name", ",", "False", ")", "raise", "tornado", ".", "gen", ".", "Return", "(", "exist", ")" ]
Check if the request exists. Used internally by future_get_request. This method is aware of synchronisation in progress and if inspection of the server is allowed. Parameters ---------- name : str Name of the request to verify. update : bool or None, optional If a katcp request to the server should be made to check if the sensor is on the server. True = Allow, False do not Allow, None use the class default. Notes ----- Ensure that self.state.data_synced == True if yielding to future_check_request from a state-change callback, or a deadlock will occur.
[ "Check", "if", "the", "request", "exists", "." ]
python
train
36.8
juanifioren/django-oidc-provider
oidc_provider/lib/endpoints/authorize.py
https://github.com/juanifioren/django-oidc-provider/blob/f0daed07b2ac7608565b80d4c80ccf04d8c416a8/oidc_provider/lib/endpoints/authorize.py#L257-L271
def client_has_user_consent(self): """ Check if already exists user consent for some client. Return bool. """ value = False try: uc = UserConsent.objects.get(user=self.request.user, client=self.client) if (set(self.params['scope']).issubset(uc.scope)) and not (uc.has_expired()): value = True except UserConsent.DoesNotExist: pass return value
[ "def", "client_has_user_consent", "(", "self", ")", ":", "value", "=", "False", "try", ":", "uc", "=", "UserConsent", ".", "objects", ".", "get", "(", "user", "=", "self", ".", "request", ".", "user", ",", "client", "=", "self", ".", "client", ")", "if", "(", "set", "(", "self", ".", "params", "[", "'scope'", "]", ")", ".", "issubset", "(", "uc", ".", "scope", ")", ")", "and", "not", "(", "uc", ".", "has_expired", "(", ")", ")", ":", "value", "=", "True", "except", "UserConsent", ".", "DoesNotExist", ":", "pass", "return", "value" ]
Check if already exists user consent for some client. Return bool.
[ "Check", "if", "already", "exists", "user", "consent", "for", "some", "client", "." ]
python
train
29.8
bspaans/python-mingus
mingus/midi/midi_track.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_track.py#L190-L192
def note_on(self, channel, note, velocity): """Return bytes for a 'note_on' event.""" return self.midi_event(NOTE_ON, channel, note, velocity)
[ "def", "note_on", "(", "self", ",", "channel", ",", "note", ",", "velocity", ")", ":", "return", "self", ".", "midi_event", "(", "NOTE_ON", ",", "channel", ",", "note", ",", "velocity", ")" ]
Return bytes for a 'note_on' event.
[ "Return", "bytes", "for", "a", "note_on", "event", "." ]
python
train
52
google/tangent
tangent/reverse_ad.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/reverse_ad.py#L945-L963
def joint(node): """Merge the bodies of primal and adjoint into a single function. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. Returns: func: A `Module` node with a single function definition containing the combined primal and adjoint. """ node, _, _ = _fix(node) body = node.body[0].body[:-1] + node.body[1].body func = gast.Module(body=[gast.FunctionDef( name=node.body[0].name, args=node.body[1].args, body=body, decorator_list=[], returns=None)]) # Clean up anno.clearanno(func) return func
[ "def", "joint", "(", "node", ")", ":", "node", ",", "_", ",", "_", "=", "_fix", "(", "node", ")", "body", "=", "node", ".", "body", "[", "0", "]", ".", "body", "[", ":", "-", "1", "]", "+", "node", ".", "body", "[", "1", "]", ".", "body", "func", "=", "gast", ".", "Module", "(", "body", "=", "[", "gast", ".", "FunctionDef", "(", "name", "=", "node", ".", "body", "[", "0", "]", ".", "name", ",", "args", "=", "node", ".", "body", "[", "1", "]", ".", "args", ",", "body", "=", "body", ",", "decorator_list", "=", "[", "]", ",", "returns", "=", "None", ")", "]", ")", "# Clean up", "anno", ".", "clearanno", "(", "func", ")", "return", "func" ]
Merge the bodies of primal and adjoint into a single function. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. Returns: func: A `Module` node with a single function definition containing the combined primal and adjoint.
[ "Merge", "the", "bodies", "of", "primal", "and", "adjoint", "into", "a", "single", "function", "." ]
python
train
31
astropy/photutils
photutils/datasets/load.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/datasets/load.py#L112-L152
def load_spitzer_catalog(show_progress=False): # pragma: no cover """ Load a 4.5 micron Spitzer catalog. The image from which this catalog was derived is returned by :func:`load_spitzer_image`. Parameters ---------- show_progress : bool, optional Whether to display a progress bar during the download (default is `False`). Returns ------- catalog : `~astropy.table.Table` The catalog of sources. See Also -------- load_spitzer_image Examples -------- .. plot:: :include-source: from photutils import datasets catalog = datasets.load_spitzer_catalog() plt.scatter(catalog['l'], catalog['b']) plt.xlabel('Galactic l') plt.ylabel('Galactic b') plt.xlim(18.39, 18.05) plt.ylim(0.13, 0.30) """ path = get_path('spitzer_example_catalog.xml', location='remote', show_progress=show_progress) table = Table.read(path) return table
[ "def", "load_spitzer_catalog", "(", "show_progress", "=", "False", ")", ":", "# pragma: no cover", "path", "=", "get_path", "(", "'spitzer_example_catalog.xml'", ",", "location", "=", "'remote'", ",", "show_progress", "=", "show_progress", ")", "table", "=", "Table", ".", "read", "(", "path", ")", "return", "table" ]
Load a 4.5 micron Spitzer catalog. The image from which this catalog was derived is returned by :func:`load_spitzer_image`. Parameters ---------- show_progress : bool, optional Whether to display a progress bar during the download (default is `False`). Returns ------- catalog : `~astropy.table.Table` The catalog of sources. See Also -------- load_spitzer_image Examples -------- .. plot:: :include-source: from photutils import datasets catalog = datasets.load_spitzer_catalog() plt.scatter(catalog['l'], catalog['b']) plt.xlabel('Galactic l') plt.ylabel('Galactic b') plt.xlim(18.39, 18.05) plt.ylim(0.13, 0.30)
[ "Load", "a", "4", ".", "5", "micron", "Spitzer", "catalog", "." ]
python
train
23.902439
Neurita/boyle
boyle/utils/validation.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/utils/validation.py#L90-L97
def _num_samples(x): """Return number of samples in array-like x.""" if not hasattr(x, '__len__') and not hasattr(x, 'shape'): if hasattr(x, '__array__'): x = np.asarray(x) else: raise TypeError("Expected sequence or array-like, got %r" % x) return x.shape[0] if hasattr(x, 'shape') else len(x)
[ "def", "_num_samples", "(", "x", ")", ":", "if", "not", "hasattr", "(", "x", ",", "'__len__'", ")", "and", "not", "hasattr", "(", "x", ",", "'shape'", ")", ":", "if", "hasattr", "(", "x", ",", "'__array__'", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "else", ":", "raise", "TypeError", "(", "\"Expected sequence or array-like, got %r\"", "%", "x", ")", "return", "x", ".", "shape", "[", "0", "]", "if", "hasattr", "(", "x", ",", "'shape'", ")", "else", "len", "(", "x", ")" ]
Return number of samples in array-like x.
[ "Return", "number", "of", "samples", "in", "array", "-", "like", "x", "." ]
python
valid
42.375
mozilla/mozdownload
mozdownload/parser.py
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/parser.py#L81-L89
def handle_data(self, data): """Callback when the data of a tag has been collected.""" # Only process the data when we are in an active a tag and have an URL. if not self.active_url: return # The visible text can have a final slash so strip it off if data.strip('/') == self.active_url: self.entries.append(self.active_url)
[ "def", "handle_data", "(", "self", ",", "data", ")", ":", "# Only process the data when we are in an active a tag and have an URL.", "if", "not", "self", ".", "active_url", ":", "return", "# The visible text can have a final slash so strip it off", "if", "data", ".", "strip", "(", "'/'", ")", "==", "self", ".", "active_url", ":", "self", ".", "entries", ".", "append", "(", "self", ".", "active_url", ")" ]
Callback when the data of a tag has been collected.
[ "Callback", "when", "the", "data", "of", "a", "tag", "has", "been", "collected", "." ]
python
train
42.222222
materialsproject/pymatgen
pymatgen/io/abinit/abiobjects.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/abiobjects.py#L667-L683
def gamma_centered(cls, kpts=(1, 1, 1), use_symmetries=True, use_time_reversal=True): """ Convenient static constructor for an automatic Gamma centered Kpoint grid. Args: kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors. use_symmetries: False if spatial symmetries should not be used to reduce the number of independent k-points. use_time_reversal: False if time-reversal symmetry should not be used to reduce the number of independent k-points. Returns: :class:`KSampling` object. """ return cls(kpts=[kpts], kpt_shifts=(0.0, 0.0, 0.0), use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, comment="gamma-centered mode")
[ "def", "gamma_centered", "(", "cls", ",", "kpts", "=", "(", "1", ",", "1", ",", "1", ")", ",", "use_symmetries", "=", "True", ",", "use_time_reversal", "=", "True", ")", ":", "return", "cls", "(", "kpts", "=", "[", "kpts", "]", ",", "kpt_shifts", "=", "(", "0.0", ",", "0.0", ",", "0.0", ")", ",", "use_symmetries", "=", "use_symmetries", ",", "use_time_reversal", "=", "use_time_reversal", ",", "comment", "=", "\"gamma-centered mode\"", ")" ]
Convenient static constructor for an automatic Gamma centered Kpoint grid. Args: kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors. use_symmetries: False if spatial symmetries should not be used to reduce the number of independent k-points. use_time_reversal: False if time-reversal symmetry should not be used to reduce the number of independent k-points. Returns: :class:`KSampling` object.
[ "Convenient", "static", "constructor", "for", "an", "automatic", "Gamma", "centered", "Kpoint", "grid", "." ]
python
train
47.529412
softlayer/softlayer-python
SoftLayer/managers/cdn.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/cdn.py#L93-L101
def remove_origin(self, account_id, origin_id): """Removes an origin pull mapping with the given origin pull ID. :param int account_id: the CDN account ID from which the mapping should be deleted. :param int origin_id: the origin pull mapping ID to delete. """ return self.account.deleteOriginPullRule(origin_id, id=account_id)
[ "def", "remove_origin", "(", "self", ",", "account_id", ",", "origin_id", ")", ":", "return", "self", ".", "account", ".", "deleteOriginPullRule", "(", "origin_id", ",", "id", "=", "account_id", ")" ]
Removes an origin pull mapping with the given origin pull ID. :param int account_id: the CDN account ID from which the mapping should be deleted. :param int origin_id: the origin pull mapping ID to delete.
[ "Removes", "an", "origin", "pull", "mapping", "with", "the", "given", "origin", "pull", "ID", "." ]
python
train
43.555556
Jajcus/pyxmpp2
pyxmpp2/roster.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/roster.py#L539-L548
def groups(self): """Set of groups defined in the roster. :Return: the groups :ReturnType: `set` of `unicode` """ groups = set() for item in self._items: groups |= item.groups return groups
[ "def", "groups", "(", "self", ")", ":", "groups", "=", "set", "(", ")", "for", "item", "in", "self", ".", "_items", ":", "groups", "|=", "item", ".", "groups", "return", "groups" ]
Set of groups defined in the roster. :Return: the groups :ReturnType: `set` of `unicode`
[ "Set", "of", "groups", "defined", "in", "the", "roster", "." ]
python
valid
24.9
googlefonts/ufo2ft
Lib/ufo2ft/outlineCompiler.py
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/outlineCompiler.py#L398-L450
def setupTable_cmap(self): """ Make the cmap table. **This should not be called externally.** Subclasses may override or supplement this method to handle the table creation in a different way if desired. """ if "cmap" not in self.tables: return from fontTools.ttLib.tables._c_m_a_p import cmap_format_4 nonBMP = dict((k,v) for k,v in self.unicodeToGlyphNameMapping.items() if k > 65535) if nonBMP: mapping = dict((k,v) for k,v in self.unicodeToGlyphNameMapping.items() if k <= 65535) else: mapping = dict(self.unicodeToGlyphNameMapping) # mac cmap4_0_3 = cmap_format_4(4) cmap4_0_3.platformID = 0 cmap4_0_3.platEncID = 3 cmap4_0_3.language = 0 cmap4_0_3.cmap = mapping # windows cmap4_3_1 = cmap_format_4(4) cmap4_3_1.platformID = 3 cmap4_3_1.platEncID = 1 cmap4_3_1.language = 0 cmap4_3_1.cmap = mapping # store self.otf["cmap"] = cmap = newTable("cmap") cmap.tableVersion = 0 cmap.tables = [cmap4_0_3, cmap4_3_1] # If we have glyphs outside Unicode BMP, we must set another # subtable that can hold longer codepoints for them. if nonBMP: from fontTools.ttLib.tables._c_m_a_p import cmap_format_12 nonBMP.update(mapping) # mac cmap12_0_4 = cmap_format_12(12) cmap12_0_4.platformID = 0 cmap12_0_4.platEncID = 4 cmap12_0_4.language = 0 cmap12_0_4.cmap = nonBMP # windows cmap12_3_10 = cmap_format_12(12) cmap12_3_10.platformID = 3 cmap12_3_10.platEncID = 10 cmap12_3_10.language = 0 cmap12_3_10.cmap = nonBMP # update tables registry cmap.tables = [cmap4_0_3, cmap4_3_1, cmap12_0_4, cmap12_3_10]
[ "def", "setupTable_cmap", "(", "self", ")", ":", "if", "\"cmap\"", "not", "in", "self", ".", "tables", ":", "return", "from", "fontTools", ".", "ttLib", ".", "tables", ".", "_c_m_a_p", "import", "cmap_format_4", "nonBMP", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "self", ".", "unicodeToGlyphNameMapping", ".", "items", "(", ")", "if", "k", ">", "65535", ")", "if", "nonBMP", ":", "mapping", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "self", ".", "unicodeToGlyphNameMapping", ".", "items", "(", ")", "if", "k", "<=", "65535", ")", "else", ":", "mapping", "=", "dict", "(", "self", ".", "unicodeToGlyphNameMapping", ")", "# mac", "cmap4_0_3", "=", "cmap_format_4", "(", "4", ")", "cmap4_0_3", ".", "platformID", "=", "0", "cmap4_0_3", ".", "platEncID", "=", "3", "cmap4_0_3", ".", "language", "=", "0", "cmap4_0_3", ".", "cmap", "=", "mapping", "# windows", "cmap4_3_1", "=", "cmap_format_4", "(", "4", ")", "cmap4_3_1", ".", "platformID", "=", "3", "cmap4_3_1", ".", "platEncID", "=", "1", "cmap4_3_1", ".", "language", "=", "0", "cmap4_3_1", ".", "cmap", "=", "mapping", "# store", "self", ".", "otf", "[", "\"cmap\"", "]", "=", "cmap", "=", "newTable", "(", "\"cmap\"", ")", "cmap", ".", "tableVersion", "=", "0", "cmap", ".", "tables", "=", "[", "cmap4_0_3", ",", "cmap4_3_1", "]", "# If we have glyphs outside Unicode BMP, we must set another", "# subtable that can hold longer codepoints for them.", "if", "nonBMP", ":", "from", "fontTools", ".", "ttLib", ".", "tables", ".", "_c_m_a_p", "import", "cmap_format_12", "nonBMP", ".", "update", "(", "mapping", ")", "# mac", "cmap12_0_4", "=", "cmap_format_12", "(", "12", ")", "cmap12_0_4", ".", "platformID", "=", "0", "cmap12_0_4", ".", "platEncID", "=", "4", "cmap12_0_4", ".", "language", "=", "0", "cmap12_0_4", ".", "cmap", "=", "nonBMP", "# windows", "cmap12_3_10", "=", "cmap_format_12", "(", "12", ")", "cmap12_3_10", ".", "platformID", "=", "3", "cmap12_3_10", ".", "platEncID", "=", "10", "cmap12_3_10", ".", "language", "=", "0", "cmap12_3_10", ".", "cmap", "=", "nonBMP", "# update tables registry", "cmap", ".", "tables", "=", "[", "cmap4_0_3", ",", "cmap4_3_1", ",", "cmap12_0_4", ",", "cmap12_3_10", "]" ]
Make the cmap table. **This should not be called externally.** Subclasses may override or supplement this method to handle the table creation in a different way if desired.
[ "Make", "the", "cmap", "table", "." ]
python
train
36.09434
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/Thing.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/Thing.py#L704-L732
def _cb_created(self, payload, duplicated): """Indirect callback (via Client) for point & subscription creation responses""" if payload[P_RESOURCE] in _POINT_TYPE_TO_CLASS: store = self.__new_feeds if payload[P_RESOURCE] == R_FEED else self.__new_controls cls = _POINT_TYPE_TO_CLASS[payload[P_RESOURCE]] with store: store[payload[P_LID]] = cls(self._client, payload[P_ENTITY_LID], payload[P_LID], payload[P_ID]) logger.debug('Added %s: %s (for %s)', foc_to_str(payload[P_RESOURCE]), payload[P_LID], payload[P_ENTITY_LID]) elif payload[P_RESOURCE] == R_SUB: # local if P_POINT_ENTITY_LID in payload: key = (payload[P_POINT_TYPE], (payload[P_POINT_ENTITY_LID], payload[P_POINT_LID])) # global else: key = (payload[P_POINT_TYPE], payload[P_POINT_ID]) new_subs = self.__new_subs with new_subs: if key in new_subs: cls = RemoteFeed if payload[P_POINT_TYPE] == R_FEED else RemoteControl new_subs[key] = cls(self._client, payload[P_ID], payload[P_POINT_ID], payload[P_ENTITY_LID]) else: logger.warning('Ignoring subscription creation for unexpected %s: %s', foc_to_str(payload[P_POINT_TYPE]), key[1]) else: logger.error('Resource creation of type %d unhandled', payload[P_RESOURCE])
[ "def", "_cb_created", "(", "self", ",", "payload", ",", "duplicated", ")", ":", "if", "payload", "[", "P_RESOURCE", "]", "in", "_POINT_TYPE_TO_CLASS", ":", "store", "=", "self", ".", "__new_feeds", "if", "payload", "[", "P_RESOURCE", "]", "==", "R_FEED", "else", "self", ".", "__new_controls", "cls", "=", "_POINT_TYPE_TO_CLASS", "[", "payload", "[", "P_RESOURCE", "]", "]", "with", "store", ":", "store", "[", "payload", "[", "P_LID", "]", "]", "=", "cls", "(", "self", ".", "_client", ",", "payload", "[", "P_ENTITY_LID", "]", ",", "payload", "[", "P_LID", "]", ",", "payload", "[", "P_ID", "]", ")", "logger", ".", "debug", "(", "'Added %s: %s (for %s)'", ",", "foc_to_str", "(", "payload", "[", "P_RESOURCE", "]", ")", ",", "payload", "[", "P_LID", "]", ",", "payload", "[", "P_ENTITY_LID", "]", ")", "elif", "payload", "[", "P_RESOURCE", "]", "==", "R_SUB", ":", "# local", "if", "P_POINT_ENTITY_LID", "in", "payload", ":", "key", "=", "(", "payload", "[", "P_POINT_TYPE", "]", ",", "(", "payload", "[", "P_POINT_ENTITY_LID", "]", ",", "payload", "[", "P_POINT_LID", "]", ")", ")", "# global", "else", ":", "key", "=", "(", "payload", "[", "P_POINT_TYPE", "]", ",", "payload", "[", "P_POINT_ID", "]", ")", "new_subs", "=", "self", ".", "__new_subs", "with", "new_subs", ":", "if", "key", "in", "new_subs", ":", "cls", "=", "RemoteFeed", "if", "payload", "[", "P_POINT_TYPE", "]", "==", "R_FEED", "else", "RemoteControl", "new_subs", "[", "key", "]", "=", "cls", "(", "self", ".", "_client", ",", "payload", "[", "P_ID", "]", ",", "payload", "[", "P_POINT_ID", "]", ",", "payload", "[", "P_ENTITY_LID", "]", ")", "else", ":", "logger", ".", "warning", "(", "'Ignoring subscription creation for unexpected %s: %s'", ",", "foc_to_str", "(", "payload", "[", "P_POINT_TYPE", "]", ")", ",", "key", "[", "1", "]", ")", "else", ":", "logger", ".", "error", "(", "'Resource creation of type %d unhandled'", ",", "payload", "[", "P_RESOURCE", "]", ")" ]
Indirect callback (via Client) for point & subscription creation responses
[ "Indirect", "callback", "(", "via", "Client", ")", "for", "point", "&", "subscription", "creation", "responses" ]
python
train
52.206897
hootnot/postcode-api-wrapper
postcodepy/typedefs.py
https://github.com/hootnot/postcode-api-wrapper/blob/42359cb9402f84a06f7d58f889f1156d653f5ea9/postcodepy/typedefs.py#L55-L72
def translate_addresstype(f): """decorator to translate the addressType field. translate the value of the addressType field of the API response into a translated type. """ @wraps(f) def wr(r, pc): at = r["addressType"] try: r.update({"addressType": POSTCODE_API_TYPEDEFS_ADDRESS_TYPES[at]}) except: logger.warning("Warning: {}: " "unknown 'addressType': {}".format(pc, at)) return f(r, pc) return wr
[ "def", "translate_addresstype", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wr", "(", "r", ",", "pc", ")", ":", "at", "=", "r", "[", "\"addressType\"", "]", "try", ":", "r", ".", "update", "(", "{", "\"addressType\"", ":", "POSTCODE_API_TYPEDEFS_ADDRESS_TYPES", "[", "at", "]", "}", ")", "except", ":", "logger", ".", "warning", "(", "\"Warning: {}: \"", "\"unknown 'addressType': {}\"", ".", "format", "(", "pc", ",", "at", ")", ")", "return", "f", "(", "r", ",", "pc", ")", "return", "wr" ]
decorator to translate the addressType field. translate the value of the addressType field of the API response into a translated type.
[ "decorator", "to", "translate", "the", "addressType", "field", "." ]
python
train
27.555556
Scifabric/pbs
pbs.py
https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/pbs.py#L202-L214
def update_task_redundancy(config, task_id, redundancy): """Update task redudancy for a project.""" if task_id is None: msg = ("Are you sure you want to update all the tasks redundancy?") if click.confirm(msg): res = _update_tasks_redundancy(config, task_id, redundancy) click.echo(res) else: click.echo("Aborting.") else: res = _update_tasks_redundancy(config, task_id, redundancy) click.echo(res)
[ "def", "update_task_redundancy", "(", "config", ",", "task_id", ",", "redundancy", ")", ":", "if", "task_id", "is", "None", ":", "msg", "=", "(", "\"Are you sure you want to update all the tasks redundancy?\"", ")", "if", "click", ".", "confirm", "(", "msg", ")", ":", "res", "=", "_update_tasks_redundancy", "(", "config", ",", "task_id", ",", "redundancy", ")", "click", ".", "echo", "(", "res", ")", "else", ":", "click", ".", "echo", "(", "\"Aborting.\"", ")", "else", ":", "res", "=", "_update_tasks_redundancy", "(", "config", ",", "task_id", ",", "redundancy", ")", "click", ".", "echo", "(", "res", ")" ]
Update task redudancy for a project.
[ "Update", "task", "redudancy", "for", "a", "project", "." ]
python
train
36.538462
UDST/orca
orca/orca.py
https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/orca.py#L239-L253
def update_col(self, column_name, series): """ Add or replace a column in the underlying DataFrame. Parameters ---------- column_name : str Column to add or replace. series : pandas.Series or sequence Column data. """ logger.debug('updating column {!r} in table {!r}'.format( column_name, self.name)) self.local[column_name] = series
[ "def", "update_col", "(", "self", ",", "column_name", ",", "series", ")", ":", "logger", ".", "debug", "(", "'updating column {!r} in table {!r}'", ".", "format", "(", "column_name", ",", "self", ".", "name", ")", ")", "self", ".", "local", "[", "column_name", "]", "=", "series" ]
Add or replace a column in the underlying DataFrame. Parameters ---------- column_name : str Column to add or replace. series : pandas.Series or sequence Column data.
[ "Add", "or", "replace", "a", "column", "in", "the", "underlying", "DataFrame", "." ]
python
train
28.6
petl-developers/petl
petl/io/pickle.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/io/pickle.py#L100-L112
def appendpickle(table, source=None, protocol=-1, write_header=False): """ Append data to an existing pickle file. I.e., as :func:`petl.io.pickle.topickle` but the file is opened in append mode. Note that no attempt is made to check that the fields or row lengths are consistent with the existing data, the data rows from the table are simply appended to the file. """ _writepickle(table, source=source, mode='ab', protocol=protocol, write_header=write_header)
[ "def", "appendpickle", "(", "table", ",", "source", "=", "None", ",", "protocol", "=", "-", "1", ",", "write_header", "=", "False", ")", ":", "_writepickle", "(", "table", ",", "source", "=", "source", ",", "mode", "=", "'ab'", ",", "protocol", "=", "protocol", ",", "write_header", "=", "write_header", ")" ]
Append data to an existing pickle file. I.e., as :func:`petl.io.pickle.topickle` but the file is opened in append mode. Note that no attempt is made to check that the fields or row lengths are consistent with the existing data, the data rows from the table are simply appended to the file.
[ "Append", "data", "to", "an", "existing", "pickle", "file", ".", "I", ".", "e", ".", "as", ":", "func", ":", "petl", ".", "io", ".", "pickle", ".", "topickle", "but", "the", "file", "is", "opened", "in", "append", "mode", "." ]
python
train
38.461538
annoviko/pyclustering
pyclustering/container/cftree.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/container/cftree.py#L1219-L1240
def show_feature_destibution(self, data = None): """! @brief Shows feature distribution. @details Only features in 1D, 2D, 3D space can be visualized. @param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only. """ visualizer = cluster_visualizer(); print("amount of nodes: ", self.__amount_nodes); if (data is not None): visualizer.append_cluster(data, marker = 'x'); for level in range(0, self.height): level_nodes = self.get_level_nodes(level); centers = [ node.feature.get_centroid() for node in level_nodes ]; visualizer.append_cluster(centers, None, markersize = (self.height - level + 1) * 5); visualizer.show();
[ "def", "show_feature_destibution", "(", "self", ",", "data", "=", "None", ")", ":", "visualizer", "=", "cluster_visualizer", "(", ")", "print", "(", "\"amount of nodes: \"", ",", "self", ".", "__amount_nodes", ")", "if", "(", "data", "is", "not", "None", ")", ":", "visualizer", ".", "append_cluster", "(", "data", ",", "marker", "=", "'x'", ")", "for", "level", "in", "range", "(", "0", ",", "self", ".", "height", ")", ":", "level_nodes", "=", "self", ".", "get_level_nodes", "(", "level", ")", "centers", "=", "[", "node", ".", "feature", ".", "get_centroid", "(", ")", "for", "node", "in", "level_nodes", "]", "visualizer", ".", "append_cluster", "(", "centers", ",", "None", ",", "markersize", "=", "(", "self", ".", "height", "-", "level", "+", "1", ")", "*", "5", ")", "visualizer", ".", "show", "(", ")" ]
! @brief Shows feature distribution. @details Only features in 1D, 2D, 3D space can be visualized. @param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only.
[ "!" ]
python
valid
40.318182
progrium/skypipe
skypipe/cloud.py
https://github.com/progrium/skypipe/blob/6162610a1876282ff1cc8eeca6c8669b8f605482/skypipe/cloud.py#L56-L62
def lookup_endpoint(cli): """Looks up the application endpoint from dotcloud""" url = '/applications/{0}/environment'.format(APPNAME) environ = cli.user.get(url).item port = environ['DOTCLOUD_SATELLITE_ZMQ_PORT'] host = socket.gethostbyname(environ['DOTCLOUD_SATELLITE_ZMQ_HOST']) return "tcp://{0}:{1}".format(host, port)
[ "def", "lookup_endpoint", "(", "cli", ")", ":", "url", "=", "'/applications/{0}/environment'", ".", "format", "(", "APPNAME", ")", "environ", "=", "cli", ".", "user", ".", "get", "(", "url", ")", ".", "item", "port", "=", "environ", "[", "'DOTCLOUD_SATELLITE_ZMQ_PORT'", "]", "host", "=", "socket", ".", "gethostbyname", "(", "environ", "[", "'DOTCLOUD_SATELLITE_ZMQ_HOST'", "]", ")", "return", "\"tcp://{0}:{1}\"", ".", "format", "(", "host", ",", "port", ")" ]
Looks up the application endpoint from dotcloud
[ "Looks", "up", "the", "application", "endpoint", "from", "dotcloud" ]
python
train
48.571429
log2timeline/dfvfs
dfvfs/compression/zlib_decompressor.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/compression/zlib_decompressor.py#L35-L57
def Decompress(self, compressed_data): """Decompresses the compressed data. Args: compressed_data (bytes): compressed data. Returns: tuple(bytes, bytes): uncompressed data and remaining compressed data. Raises: BackEndError: if the zlib compressed stream cannot be decompressed. """ try: uncompressed_data = self._zlib_decompressor.decompress(compressed_data) remaining_compressed_data = getattr( self._zlib_decompressor, 'unused_data', b'') except zlib.error as exception: raise errors.BackEndError(( 'Unable to decompress zlib compressed stream with error: ' '{0!s}.').format(exception)) return uncompressed_data, remaining_compressed_data
[ "def", "Decompress", "(", "self", ",", "compressed_data", ")", ":", "try", ":", "uncompressed_data", "=", "self", ".", "_zlib_decompressor", ".", "decompress", "(", "compressed_data", ")", "remaining_compressed_data", "=", "getattr", "(", "self", ".", "_zlib_decompressor", ",", "'unused_data'", ",", "b''", ")", "except", "zlib", ".", "error", "as", "exception", ":", "raise", "errors", ".", "BackEndError", "(", "(", "'Unable to decompress zlib compressed stream with error: '", "'{0!s}.'", ")", ".", "format", "(", "exception", ")", ")", "return", "uncompressed_data", ",", "remaining_compressed_data" ]
Decompresses the compressed data. Args: compressed_data (bytes): compressed data. Returns: tuple(bytes, bytes): uncompressed data and remaining compressed data. Raises: BackEndError: if the zlib compressed stream cannot be decompressed.
[ "Decompresses", "the", "compressed", "data", "." ]
python
train
31.391304
inspirehep/inspire-schemas
inspire_schemas/builders/literature.py
https://github.com/inspirehep/inspire-schemas/blob/34bc124b62fba565b6b40d1a3c15103a23a05edb/inspire_schemas/builders/literature.py#L319-L333
def add_book_series(self, title, volume=None): """ :param volume: the volume of the book :type volume: string :param title: the title of the book :type title: string """ book_series = {} if title is not None: book_series['title'] = title if volume is not None: book_series['volume'] = volume self._append_to('book_series', book_series)
[ "def", "add_book_series", "(", "self", ",", "title", ",", "volume", "=", "None", ")", ":", "book_series", "=", "{", "}", "if", "title", "is", "not", "None", ":", "book_series", "[", "'title'", "]", "=", "title", "if", "volume", "is", "not", "None", ":", "book_series", "[", "'volume'", "]", "=", "volume", "self", ".", "_append_to", "(", "'book_series'", ",", "book_series", ")" ]
:param volume: the volume of the book :type volume: string :param title: the title of the book :type title: string
[ ":", "param", "volume", ":", "the", "volume", "of", "the", "book", ":", "type", "volume", ":", "string" ]
python
train
28.466667
consbio/gis-metadata-parser
gis_metadata/utils.py
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/utils.py#L278-L296
def parse_complex(tree_to_parse, xpath_root, xpath_map, complex_key): """ Creates and returns a Dictionary data structure parsed from the metadata. :param tree_to_parse: the XML tree compatible with element_utils to be parsed :param xpath_root: the XPATH location of the structure inside the parent element :param xpath_map: a dict of XPATHs corresponding to a complex definition :param complex_key: indicates which complex definition describes the structure """ complex_struct = {} for prop in _complex_definitions.get(complex_key, xpath_map): # Normalize complex values: treat values with newlines like values from separate elements parsed = parse_property(tree_to_parse, xpath_root, xpath_map, prop) parsed = reduce_value(flatten_items(v.split(_COMPLEX_DELIM) for v in wrap_value(parsed))) complex_struct[prop] = get_default_for_complex_sub(complex_key, prop, parsed, xpath_map[prop]) return complex_struct if any(complex_struct.values()) else {}
[ "def", "parse_complex", "(", "tree_to_parse", ",", "xpath_root", ",", "xpath_map", ",", "complex_key", ")", ":", "complex_struct", "=", "{", "}", "for", "prop", "in", "_complex_definitions", ".", "get", "(", "complex_key", ",", "xpath_map", ")", ":", "# Normalize complex values: treat values with newlines like values from separate elements", "parsed", "=", "parse_property", "(", "tree_to_parse", ",", "xpath_root", ",", "xpath_map", ",", "prop", ")", "parsed", "=", "reduce_value", "(", "flatten_items", "(", "v", ".", "split", "(", "_COMPLEX_DELIM", ")", "for", "v", "in", "wrap_value", "(", "parsed", ")", ")", ")", "complex_struct", "[", "prop", "]", "=", "get_default_for_complex_sub", "(", "complex_key", ",", "prop", ",", "parsed", ",", "xpath_map", "[", "prop", "]", ")", "return", "complex_struct", "if", "any", "(", "complex_struct", ".", "values", "(", ")", ")", "else", "{", "}" ]
Creates and returns a Dictionary data structure parsed from the metadata. :param tree_to_parse: the XML tree compatible with element_utils to be parsed :param xpath_root: the XPATH location of the structure inside the parent element :param xpath_map: a dict of XPATHs corresponding to a complex definition :param complex_key: indicates which complex definition describes the structure
[ "Creates", "and", "returns", "a", "Dictionary", "data", "structure", "parsed", "from", "the", "metadata", ".", ":", "param", "tree_to_parse", ":", "the", "XML", "tree", "compatible", "with", "element_utils", "to", "be", "parsed", ":", "param", "xpath_root", ":", "the", "XPATH", "location", "of", "the", "structure", "inside", "the", "parent", "element", ":", "param", "xpath_map", ":", "a", "dict", "of", "XPATHs", "corresponding", "to", "a", "complex", "definition", ":", "param", "complex_key", ":", "indicates", "which", "complex", "definition", "describes", "the", "structure" ]
python
train
53
openstack/horizon
horizon/base.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/base.py#L937-L988
def _process_panel_configuration(self, config): """Add, remove and set default panels on the dashboard.""" try: dashboard = config.get('PANEL_DASHBOARD') if not dashboard: LOG.warning("Skipping %s because it doesn't have " "PANEL_DASHBOARD defined.", config.__name__) return panel_slug = config.get('PANEL') dashboard_cls = self.get_dashboard(dashboard) panel_group = config.get('PANEL_GROUP') default_panel = config.get('DEFAULT_PANEL') # Set the default panel if default_panel: dashboard_cls.default_panel = default_panel # Remove the panel if config.get('REMOVE_PANEL', False): for panel in dashboard_cls.get_panels(): if panel_slug == panel.slug: dashboard_cls.unregister(panel.__class__) elif config.get('ADD_PANEL', None): # Add the panel to the dashboard panel_path = config['ADD_PANEL'] mod_path, panel_cls = panel_path.rsplit(".", 1) try: mod = import_module(mod_path) except ImportError as e: LOG.warning("Could not import panel module %(module)s: " "%(exc)s", {'module': mod_path, 'exc': e}) return panel = getattr(mod, panel_cls) # test is can_register method is present and call method if # it is to determine if the panel should be loaded if hasattr(panel, 'can_register') and \ callable(getattr(panel, 'can_register')): if not panel.can_register(): LOG.debug("Load condition failed for panel: %(panel)s", {'panel': panel_slug}) return dashboard_cls.register(panel) if panel_group: dashboard_cls.get_panel_group(panel_group).\ panels.append(panel.slug) else: panels = list(dashboard_cls.panels) panels.append(panel) dashboard_cls.panels = tuple(panels) except Exception as e: LOG.warning('Could not process panel %(panel)s: %(exc)s', {'panel': panel_slug, 'exc': e})
[ "def", "_process_panel_configuration", "(", "self", ",", "config", ")", ":", "try", ":", "dashboard", "=", "config", ".", "get", "(", "'PANEL_DASHBOARD'", ")", "if", "not", "dashboard", ":", "LOG", ".", "warning", "(", "\"Skipping %s because it doesn't have \"", "\"PANEL_DASHBOARD defined.\"", ",", "config", ".", "__name__", ")", "return", "panel_slug", "=", "config", ".", "get", "(", "'PANEL'", ")", "dashboard_cls", "=", "self", ".", "get_dashboard", "(", "dashboard", ")", "panel_group", "=", "config", ".", "get", "(", "'PANEL_GROUP'", ")", "default_panel", "=", "config", ".", "get", "(", "'DEFAULT_PANEL'", ")", "# Set the default panel", "if", "default_panel", ":", "dashboard_cls", ".", "default_panel", "=", "default_panel", "# Remove the panel", "if", "config", ".", "get", "(", "'REMOVE_PANEL'", ",", "False", ")", ":", "for", "panel", "in", "dashboard_cls", ".", "get_panels", "(", ")", ":", "if", "panel_slug", "==", "panel", ".", "slug", ":", "dashboard_cls", ".", "unregister", "(", "panel", ".", "__class__", ")", "elif", "config", ".", "get", "(", "'ADD_PANEL'", ",", "None", ")", ":", "# Add the panel to the dashboard", "panel_path", "=", "config", "[", "'ADD_PANEL'", "]", "mod_path", ",", "panel_cls", "=", "panel_path", ".", "rsplit", "(", "\".\"", ",", "1", ")", "try", ":", "mod", "=", "import_module", "(", "mod_path", ")", "except", "ImportError", "as", "e", ":", "LOG", ".", "warning", "(", "\"Could not import panel module %(module)s: \"", "\"%(exc)s\"", ",", "{", "'module'", ":", "mod_path", ",", "'exc'", ":", "e", "}", ")", "return", "panel", "=", "getattr", "(", "mod", ",", "panel_cls", ")", "# test is can_register method is present and call method if", "# it is to determine if the panel should be loaded", "if", "hasattr", "(", "panel", ",", "'can_register'", ")", "and", "callable", "(", "getattr", "(", "panel", ",", "'can_register'", ")", ")", ":", "if", "not", "panel", ".", "can_register", "(", ")", ":", "LOG", ".", "debug", "(", "\"Load condition failed for panel: %(panel)s\"", ",", "{", "'panel'", ":", "panel_slug", "}", ")", "return", "dashboard_cls", ".", "register", "(", "panel", ")", "if", "panel_group", ":", "dashboard_cls", ".", "get_panel_group", "(", "panel_group", ")", ".", "panels", ".", "append", "(", "panel", ".", "slug", ")", "else", ":", "panels", "=", "list", "(", "dashboard_cls", ".", "panels", ")", "panels", ".", "append", "(", "panel", ")", "dashboard_cls", ".", "panels", "=", "tuple", "(", "panels", ")", "except", "Exception", "as", "e", ":", "LOG", ".", "warning", "(", "'Could not process panel %(panel)s: %(exc)s'", ",", "{", "'panel'", ":", "panel_slug", ",", "'exc'", ":", "e", "}", ")" ]
Add, remove and set default panels on the dashboard.
[ "Add", "remove", "and", "set", "default", "panels", "on", "the", "dashboard", "." ]
python
train
47.461538
rwl/pylon
pyreto/renderer.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/renderer.py#L53-L75
def updateData(self, data): """ Updates the data used by the renderer. """ # pylab.ion() fig = pylab.figure(1) n_agent = len(data) idx = 1 for i, adata in enumerate(data): saxis = fig.add_subplot(3, n_agent, i + 1) saxis.plot(adata[0]) idx += 1 aaxis = fig.add_subplot(3, n_agent, i + 1 + n_agent) aaxis.plot(adata[1]) idx += 1 raxis = fig.add_subplot(3, n_agent, i + 1 + (n_agent * 2)) raxis.plot(adata[2]) idx += 1 pylab.show()
[ "def", "updateData", "(", "self", ",", "data", ")", ":", "# pylab.ion()", "fig", "=", "pylab", ".", "figure", "(", "1", ")", "n_agent", "=", "len", "(", "data", ")", "idx", "=", "1", "for", "i", ",", "adata", "in", "enumerate", "(", "data", ")", ":", "saxis", "=", "fig", ".", "add_subplot", "(", "3", ",", "n_agent", ",", "i", "+", "1", ")", "saxis", ".", "plot", "(", "adata", "[", "0", "]", ")", "idx", "+=", "1", "aaxis", "=", "fig", ".", "add_subplot", "(", "3", ",", "n_agent", ",", "i", "+", "1", "+", "n_agent", ")", "aaxis", ".", "plot", "(", "adata", "[", "1", "]", ")", "idx", "+=", "1", "raxis", "=", "fig", ".", "add_subplot", "(", "3", ",", "n_agent", ",", "i", "+", "1", "+", "(", "n_agent", "*", "2", ")", ")", "raxis", ".", "plot", "(", "adata", "[", "2", "]", ")", "idx", "+=", "1", "pylab", ".", "show", "(", ")" ]
Updates the data used by the renderer.
[ "Updates", "the", "data", "used", "by", "the", "renderer", "." ]
python
train
25.347826
lago-project/lago
lago/prefix.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/prefix.py#L1501-L1512
def is_prefix(cls, path): """ Check if a path is a valid prefix Args: path(str): path to be checked Returns: bool: True if the given path is a prefix """ lagofile = paths.Paths(path).prefix_lagofile() return os.path.isfile(lagofile)
[ "def", "is_prefix", "(", "cls", ",", "path", ")", ":", "lagofile", "=", "paths", ".", "Paths", "(", "path", ")", ".", "prefix_lagofile", "(", ")", "return", "os", ".", "path", ".", "isfile", "(", "lagofile", ")" ]
Check if a path is a valid prefix Args: path(str): path to be checked Returns: bool: True if the given path is a prefix
[ "Check", "if", "a", "path", "is", "a", "valid", "prefix" ]
python
train
25.25
RJT1990/pyflux
pyflux/gpnarx/gpnarx.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gpnarx/gpnarx.py#L442-L465
def predict(self, h=5): """ Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? Returns ---------- - pd.DataFrame with predicted values """ if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: predictions, _, _, _ = self._construct_predict(self.latent_variables.get_z_values(),h) predictions = predictions*self._norm_std + self._norm_mean date_index = self.shift_dates(h) result = pd.DataFrame(predictions) result.rename(columns={0:self.data_name}, inplace=True) result.index = date_index[-h:] return result
[ "def", "predict", "(", "self", ",", "h", "=", "5", ")", ":", "if", "self", ".", "latent_variables", ".", "estimated", "is", "False", ":", "raise", "Exception", "(", "\"No latent variables estimated!\"", ")", "else", ":", "predictions", ",", "_", ",", "_", ",", "_", "=", "self", ".", "_construct_predict", "(", "self", ".", "latent_variables", ".", "get_z_values", "(", ")", ",", "h", ")", "predictions", "=", "predictions", "*", "self", ".", "_norm_std", "+", "self", ".", "_norm_mean", "date_index", "=", "self", ".", "shift_dates", "(", "h", ")", "result", "=", "pd", ".", "DataFrame", "(", "predictions", ")", "result", ".", "rename", "(", "columns", "=", "{", "0", ":", "self", ".", "data_name", "}", ",", "inplace", "=", "True", ")", "result", ".", "index", "=", "date_index", "[", "-", "h", ":", "]", "return", "result" ]
Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? Returns ---------- - pd.DataFrame with predicted values
[ "Makes", "forecast", "with", "the", "estimated", "model", "Parameters", "----------", "h", ":", "int", "(", "default", ":", "5", ")", "How", "many", "steps", "ahead", "would", "you", "like", "to", "forecast?", "Returns", "----------", "-", "pd", ".", "DataFrame", "with", "predicted", "values" ]
python
train
34.666667
push-things/django-th
th_evernote/my_evernote.py
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_evernote/my_evernote.py#L289-L309
def callback(self, request, **kwargs): """ Called from the Service when the user accept to activate it """ try: client = self.get_evernote_client() # finally we save the user auth token # As we already stored the object ServicesActivated # from the UserServiceCreateView now we update the same # object to the database so : # 1) we get the previous object us = UserService.objects.get(user=request.user, name=ServicesActivated.objects.get(name='ServiceEvernote')) # 2) then get the token us.token = client.get_access_token(request.session['oauth_token'], request.session['oauth_token_secret'], request.GET.get('oauth_verifier', '')) # 3) and save everything us.save() except KeyError: return '/' return 'evernote/callback.html'
[ "def", "callback", "(", "self", ",", "request", ",", "*", "*", "kwargs", ")", ":", "try", ":", "client", "=", "self", ".", "get_evernote_client", "(", ")", "# finally we save the user auth token", "# As we already stored the object ServicesActivated", "# from the UserServiceCreateView now we update the same", "# object to the database so :", "# 1) we get the previous object", "us", "=", "UserService", ".", "objects", ".", "get", "(", "user", "=", "request", ".", "user", ",", "name", "=", "ServicesActivated", ".", "objects", ".", "get", "(", "name", "=", "'ServiceEvernote'", ")", ")", "# 2) then get the token", "us", ".", "token", "=", "client", ".", "get_access_token", "(", "request", ".", "session", "[", "'oauth_token'", "]", ",", "request", ".", "session", "[", "'oauth_token_secret'", "]", ",", "request", ".", "GET", ".", "get", "(", "'oauth_verifier'", ",", "''", ")", ")", "# 3) and save everything", "us", ".", "save", "(", ")", "except", "KeyError", ":", "return", "'/'", "return", "'evernote/callback.html'" ]
Called from the Service when the user accept to activate it
[ "Called", "from", "the", "Service", "when", "the", "user", "accept", "to", "activate", "it" ]
python
train
45.285714
devision-io/metasdk
metasdk/services/MetaqlService.py
https://github.com/devision-io/metasdk/blob/1a1af5ceeb8ade843fd656c9c27c8b9ff789fc68/metasdk/services/MetaqlService.py#L15-L26
def download_data(self, configuration, output_file): """ Выполняет указанный в конфигурации запрос и отдает файл на скачивание :param configuration: Конфгурация запроса :param output_file: Место, куда надо скачать файл :return: """ params = configuration response = self.__app.native_api_call('metaql', 'download-data', params, self.__options, False, None, True, http_path="/api/v1/meta/") with open(output_file, 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) del response
[ "def", "download_data", "(", "self", ",", "configuration", ",", "output_file", ")", ":", "params", "=", "configuration", "response", "=", "self", ".", "__app", ".", "native_api_call", "(", "'metaql'", ",", "'download-data'", ",", "params", ",", "self", ".", "__options", ",", "False", ",", "None", ",", "True", ",", "http_path", "=", "\"/api/v1/meta/\"", ")", "with", "open", "(", "output_file", ",", "'wb'", ")", "as", "out_file", ":", "shutil", ".", "copyfileobj", "(", "response", ".", "raw", ",", "out_file", ")", "del", "response" ]
Выполняет указанный в конфигурации запрос и отдает файл на скачивание :param configuration: Конфгурация запроса :param output_file: Место, куда надо скачать файл :return:
[ "Выполняет", "указанный", "в", "конфигурации", "запрос", "и", "отдает", "файл", "на", "скачивание", ":", "param", "configuration", ":", "Конфгурация", "запроса", ":", "param", "output_file", ":", "Место", "куда", "надо", "скачать", "файл", ":", "return", ":" ]
python
train
47.333333
pallets/werkzeug
examples/manage-plnt.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/examples/manage-plnt.py#L18-L25
def make_app(): """Helper function that creates a plnt app.""" from plnt import Plnt database_uri = os.environ.get("PLNT_DATABASE_URI") app = Plnt(database_uri or "sqlite:////tmp/plnt.db") app.bind_to_context() return app
[ "def", "make_app", "(", ")", ":", "from", "plnt", "import", "Plnt", "database_uri", "=", "os", ".", "environ", ".", "get", "(", "\"PLNT_DATABASE_URI\"", ")", "app", "=", "Plnt", "(", "database_uri", "or", "\"sqlite:////tmp/plnt.db\"", ")", "app", ".", "bind_to_context", "(", ")", "return", "app" ]
Helper function that creates a plnt app.
[ "Helper", "function", "that", "creates", "a", "plnt", "app", "." ]
python
train
29.875
crdoconnor/commandlib
commandlib/command.py
https://github.com/crdoconnor/commandlib/blob/b630364fd7b0d189b388e22a7f43235d182e12e4/commandlib/command.py#L115-L129
def with_env(self, **environment_variables): """ Return new Command object that will be run with additional environment variables. Specify environment variables as follows: new_cmd = old_cmd.with_env(PYTHON_PATH=".", ENV_PORT="2022") """ new_env_vars = { str(var): str(val) for var, val in environment_variables.items() } new_command = copy.deepcopy(self) new_command._env.update(new_env_vars) return new_command
[ "def", "with_env", "(", "self", ",", "*", "*", "environment_variables", ")", ":", "new_env_vars", "=", "{", "str", "(", "var", ")", ":", "str", "(", "val", ")", "for", "var", ",", "val", "in", "environment_variables", ".", "items", "(", ")", "}", "new_command", "=", "copy", ".", "deepcopy", "(", "self", ")", "new_command", ".", "_env", ".", "update", "(", "new_env_vars", ")", "return", "new_command" ]
Return new Command object that will be run with additional environment variables. Specify environment variables as follows: new_cmd = old_cmd.with_env(PYTHON_PATH=".", ENV_PORT="2022")
[ "Return", "new", "Command", "object", "that", "will", "be", "run", "with", "additional", "environment", "variables", "." ]
python
train
33.6
pantsbuild/pants
src/python/pants/java/nailgun_executor.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/java/nailgun_executor.py#L286-L299
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr): """Post-fork() child callback for ProcessManager.daemon_spawn().""" java = SubprocessExecutor(self._distribution) subproc = java.spawn(classpath=classpath, main='com.martiansoftware.nailgun.NGServer', jvm_options=jvm_options, args=[':0'], stdin=safe_open('/dev/null', 'r'), stdout=safe_open(self._ng_stdout, 'w'), stderr=safe_open(self._ng_stderr, 'w'), close_fds=True) self.write_pid(subproc.pid)
[ "def", "post_fork_child", "(", "self", ",", "fingerprint", ",", "jvm_options", ",", "classpath", ",", "stdout", ",", "stderr", ")", ":", "java", "=", "SubprocessExecutor", "(", "self", ".", "_distribution", ")", "subproc", "=", "java", ".", "spawn", "(", "classpath", "=", "classpath", ",", "main", "=", "'com.martiansoftware.nailgun.NGServer'", ",", "jvm_options", "=", "jvm_options", ",", "args", "=", "[", "':0'", "]", ",", "stdin", "=", "safe_open", "(", "'/dev/null'", ",", "'r'", ")", ",", "stdout", "=", "safe_open", "(", "self", ".", "_ng_stdout", ",", "'w'", ")", ",", "stderr", "=", "safe_open", "(", "self", ".", "_ng_stderr", ",", "'w'", ")", ",", "close_fds", "=", "True", ")", "self", ".", "write_pid", "(", "subproc", ".", "pid", ")" ]
Post-fork() child callback for ProcessManager.daemon_spawn().
[ "Post", "-", "fork", "()", "child", "callback", "for", "ProcessManager", ".", "daemon_spawn", "()", "." ]
python
train
46.928571
wesyoung/pyzyre
czmq/_czmq_ctypes.py
https://github.com/wesyoung/pyzyre/blob/22d4c757acefcfdb700d3802adaf30b402bb9eea/czmq/_czmq_ctypes.py#L3116-L3122
def insert(self, key, item): """ Insert item into hash table with specified key and item. If key is already present returns -1 and leaves existing item unchanged Returns 0 on success. """ return lib.zhashx_insert(self._as_parameter_, key, item)
[ "def", "insert", "(", "self", ",", "key", ",", "item", ")", ":", "return", "lib", ".", "zhashx_insert", "(", "self", ".", "_as_parameter_", ",", "key", ",", "item", ")" ]
Insert item into hash table with specified key and item. If key is already present returns -1 and leaves existing item unchanged Returns 0 on success.
[ "Insert", "item", "into", "hash", "table", "with", "specified", "key", "and", "item", ".", "If", "key", "is", "already", "present", "returns", "-", "1", "and", "leaves", "existing", "item", "unchanged", "Returns", "0", "on", "success", "." ]
python
train
38.571429
conchoecia/gloTK
gloTK/wrappers.py
https://github.com/conchoecia/gloTK/blob/58abee663fcfbbd09f4863c3ca3ae054e33184a8/gloTK/wrappers.py#L118-L127
def check_path(self, path): """ turns path into an absolute path and checks that it exists, then returns it as a string. """ path = os.path.abspath(path) if os.path.exists(path): return path else: utils.die("input file does not exists:\n {}".format(path))
[ "def", "check_path", "(", "self", ",", "path", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "path", "else", ":", "utils", ".", "die", "(", "\"input file does not exists:\\n {}\"", ".", "format", "(", "path", ")", ")" ]
turns path into an absolute path and checks that it exists, then returns it as a string.
[ "turns", "path", "into", "an", "absolute", "path", "and", "checks", "that", "it", "exists", "then", "returns", "it", "as", "a", "string", "." ]
python
train
32.7
cag/sphinxcontrib-soliditydomain
sphinxcontrib/soliditydomain/documenters.py
https://github.com/cag/sphinxcontrib-soliditydomain/blob/b004b6e43727771027b4065fab18fcb9ccb2c826/sphinxcontrib/soliditydomain/documenters.py#L141-L220
def generate(self, more_content=None, all_members=False): # type: (Any, str, bool, bool) -> None """Generate reST for the object given by *self.name*, and possibly for its members. If *more_content* is given, include that content. If *all_members* is True, document all members. """ directive = getattr(self, 'directivetype', self.objtype) # parse components out of name (file, _, namepath) = self.name.rpartition(':') (contract_name, _, fullname) = namepath.partition('.') (name, _, paramtypes) = fullname.partition('(') # normalize components name = name.strip() or None if directive in ('contract', 'interface', 'library') and name is None: name = contract_name contract_name = None paramtypes = ','.join(ptype.strip() for ptype in paramtypes.split(',')) paramtypes = re.sub(r'\s+', ' ', paramtypes) if paramtypes.endswith(')'): paramtypes = paramtypes[:-1] # build query expressions = [ SolidityObject.objtype == directive, SolidityObject.name == name, ] if file: expressions.append(SolidityObject.file == file) if contract_name: expressions.append(SolidityObject.contract_name == contract_name) if paramtypes: expressions.append(SolidityObject.paramtypes == paramtypes) # get associated object query = SolidityObject.select().where(*expressions) sol_objects = tuple(query) if len(sol_objects) == 0: logger.warning('{} {} could not be found via query:\n{}'.format( directive, self.name, ',\n'.join( ' ' + str(expr.lhs.column_name) + str(expr.op) + ('' if expr.rhs is None else expr.rhs) for expr in expressions ))) return elif len(sol_objects) > 1: logger.warning('multiple candidates for {} {} found:\n{}'.format( directive, self.name, '\n'.join(' ' + obj.signature for obj in sol_objects))) self.object = sol_objects[0] # begin rendering output sourcename = self.get_sourcename() # make sure that the result starts with an empty line. This is # necessary for some situations where another directive preprocesses # reST and no starting newline is present self.add_line('', sourcename) # generate the directive header and options, if applicable self.add_directive_header() # make sure content is indented # TODO: consider adding a source unit directive self.indent += self.content_indent # add all content (from docstrings, attribute docs etc.) self.add_content(more_content) # document members, if possible if directive in ('contract', 'interface', 'library'): self.add_line('', sourcename) self.document_members(all_members)
[ "def", "generate", "(", "self", ",", "more_content", "=", "None", ",", "all_members", "=", "False", ")", ":", "# type: (Any, str, bool, bool) -> None", "directive", "=", "getattr", "(", "self", ",", "'directivetype'", ",", "self", ".", "objtype", ")", "# parse components out of name", "(", "file", ",", "_", ",", "namepath", ")", "=", "self", ".", "name", ".", "rpartition", "(", "':'", ")", "(", "contract_name", ",", "_", ",", "fullname", ")", "=", "namepath", ".", "partition", "(", "'.'", ")", "(", "name", ",", "_", ",", "paramtypes", ")", "=", "fullname", ".", "partition", "(", "'('", ")", "# normalize components", "name", "=", "name", ".", "strip", "(", ")", "or", "None", "if", "directive", "in", "(", "'contract'", ",", "'interface'", ",", "'library'", ")", "and", "name", "is", "None", ":", "name", "=", "contract_name", "contract_name", "=", "None", "paramtypes", "=", "','", ".", "join", "(", "ptype", ".", "strip", "(", ")", "for", "ptype", "in", "paramtypes", ".", "split", "(", "','", ")", ")", "paramtypes", "=", "re", ".", "sub", "(", "r'\\s+'", ",", "' '", ",", "paramtypes", ")", "if", "paramtypes", ".", "endswith", "(", "')'", ")", ":", "paramtypes", "=", "paramtypes", "[", ":", "-", "1", "]", "# build query", "expressions", "=", "[", "SolidityObject", ".", "objtype", "==", "directive", ",", "SolidityObject", ".", "name", "==", "name", ",", "]", "if", "file", ":", "expressions", ".", "append", "(", "SolidityObject", ".", "file", "==", "file", ")", "if", "contract_name", ":", "expressions", ".", "append", "(", "SolidityObject", ".", "contract_name", "==", "contract_name", ")", "if", "paramtypes", ":", "expressions", ".", "append", "(", "SolidityObject", ".", "paramtypes", "==", "paramtypes", ")", "# get associated object", "query", "=", "SolidityObject", ".", "select", "(", ")", ".", "where", "(", "*", "expressions", ")", "sol_objects", "=", "tuple", "(", "query", ")", "if", "len", "(", "sol_objects", ")", "==", "0", ":", "logger", ".", "warning", "(", "'{} {} could not be found via query:\\n{}'", ".", "format", "(", "directive", ",", "self", ".", "name", ",", "',\\n'", ".", "join", "(", "' '", "+", "str", "(", "expr", ".", "lhs", ".", "column_name", ")", "+", "str", "(", "expr", ".", "op", ")", "+", "(", "''", "if", "expr", ".", "rhs", "is", "None", "else", "expr", ".", "rhs", ")", "for", "expr", "in", "expressions", ")", ")", ")", "return", "elif", "len", "(", "sol_objects", ")", ">", "1", ":", "logger", ".", "warning", "(", "'multiple candidates for {} {} found:\\n{}'", ".", "format", "(", "directive", ",", "self", ".", "name", ",", "'\\n'", ".", "join", "(", "' '", "+", "obj", ".", "signature", "for", "obj", "in", "sol_objects", ")", ")", ")", "self", ".", "object", "=", "sol_objects", "[", "0", "]", "# begin rendering output", "sourcename", "=", "self", ".", "get_sourcename", "(", ")", "# make sure that the result starts with an empty line. This is", "# necessary for some situations where another directive preprocesses", "# reST and no starting newline is present", "self", ".", "add_line", "(", "''", ",", "sourcename", ")", "# generate the directive header and options, if applicable", "self", ".", "add_directive_header", "(", ")", "# make sure content is indented", "# TODO: consider adding a source unit directive", "self", ".", "indent", "+=", "self", ".", "content_indent", "# add all content (from docstrings, attribute docs etc.)", "self", ".", "add_content", "(", "more_content", ")", "# document members, if possible", "if", "directive", "in", "(", "'contract'", ",", "'interface'", ",", "'library'", ")", ":", "self", ".", "add_line", "(", "''", ",", "sourcename", ")", "self", ".", "document_members", "(", "all_members", ")" ]
Generate reST for the object given by *self.name*, and possibly for its members. If *more_content* is given, include that content. If *all_members* is True, document all members.
[ "Generate", "reST", "for", "the", "object", "given", "by", "*", "self", ".", "name", "*", "and", "possibly", "for", "its", "members", "." ]
python
train
37.5125
daethnir/authprogs
setup.py
https://github.com/daethnir/authprogs/blob/0b1e13a609ebeabdb0f10d11fc5dc6e0b20c0343/setup.py#L102-L106
def rm_docs(self): """Remove converted docs.""" for filename in self.created: if os.path.exists(filename): os.unlink(filename)
[ "def", "rm_docs", "(", "self", ")", ":", "for", "filename", "in", "self", ".", "created", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "os", ".", "unlink", "(", "filename", ")" ]
Remove converted docs.
[ "Remove", "converted", "docs", "." ]
python
train
33.2
cbclab/MOT
mot/lib/cl_function.py
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/lib/cl_function.py#L155-L166
def from_string(cls, cl_function, dependencies=()): """Parse the given CL function into a SimpleCLFunction object. Args: cl_function (str): the function we wish to turn into an object dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on Returns: SimpleCLFunction: the CL data type for this parameter declaration """ return_type, function_name, parameter_list, body = split_cl_function(cl_function) return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies)
[ "def", "from_string", "(", "cls", ",", "cl_function", ",", "dependencies", "=", "(", ")", ")", ":", "return_type", ",", "function_name", ",", "parameter_list", ",", "body", "=", "split_cl_function", "(", "cl_function", ")", "return", "SimpleCLFunction", "(", "return_type", ",", "function_name", ",", "parameter_list", ",", "body", ",", "dependencies", "=", "dependencies", ")" ]
Parse the given CL function into a SimpleCLFunction object. Args: cl_function (str): the function we wish to turn into an object dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on Returns: SimpleCLFunction: the CL data type for this parameter declaration
[ "Parse", "the", "given", "CL", "function", "into", "a", "SimpleCLFunction", "object", "." ]
python
train
51.083333
manns/pyspread
pyspread/src/actions/_main_window_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_main_window_actions.py#L507-L553
def copy_result(self, selection): """Returns result If selection consists of one cell only and result is a bitmap then the bitmap is returned. Otherwise the method returns string representations of the result for the given selection in a tab separated string. """ bbox = selection.get_bbox() if not bbox: # There is no selection bb_top, bb_left = self.grid.actions.cursor[:2] bb_bottom, bb_right = bb_top, bb_left else: # Thereis a selection (bb_top, bb_left), (bb_bottom, bb_right) = bbox if bb_top == bb_bottom and bb_left == bb_right: # We have a single selection tab = self.grid.current_table result = self.grid.code_array[bb_top, bb_left, tab] if isinstance(result, wx._gdi.Bitmap): # The result is a wx.Bitmap. Return it. return result elif Figure is not None and isinstance(result, Figure): # The result is a matplotlib figure # Therefore, a wx.Bitmap is returned key = bb_top, bb_left, tab rect = self.grid.CellToRect(bb_top, bb_left) merged_rect = self.grid.grid_renderer.get_merged_rect( self.grid, key, rect) dpi = float(wx.ScreenDC().GetPPI()[0]) zoom = self.grid.grid_renderer.zoom return fig2bmp(result, merged_rect.width, merged_rect.height, dpi, zoom) # So we have result strings to be returned getter = self._get_result_string return self.copy(selection, getter=getter)
[ "def", "copy_result", "(", "self", ",", "selection", ")", ":", "bbox", "=", "selection", ".", "get_bbox", "(", ")", "if", "not", "bbox", ":", "# There is no selection", "bb_top", ",", "bb_left", "=", "self", ".", "grid", ".", "actions", ".", "cursor", "[", ":", "2", "]", "bb_bottom", ",", "bb_right", "=", "bb_top", ",", "bb_left", "else", ":", "# Thereis a selection", "(", "bb_top", ",", "bb_left", ")", ",", "(", "bb_bottom", ",", "bb_right", ")", "=", "bbox", "if", "bb_top", "==", "bb_bottom", "and", "bb_left", "==", "bb_right", ":", "# We have a single selection", "tab", "=", "self", ".", "grid", ".", "current_table", "result", "=", "self", ".", "grid", ".", "code_array", "[", "bb_top", ",", "bb_left", ",", "tab", "]", "if", "isinstance", "(", "result", ",", "wx", ".", "_gdi", ".", "Bitmap", ")", ":", "# The result is a wx.Bitmap. Return it.", "return", "result", "elif", "Figure", "is", "not", "None", "and", "isinstance", "(", "result", ",", "Figure", ")", ":", "# The result is a matplotlib figure", "# Therefore, a wx.Bitmap is returned", "key", "=", "bb_top", ",", "bb_left", ",", "tab", "rect", "=", "self", ".", "grid", ".", "CellToRect", "(", "bb_top", ",", "bb_left", ")", "merged_rect", "=", "self", ".", "grid", ".", "grid_renderer", ".", "get_merged_rect", "(", "self", ".", "grid", ",", "key", ",", "rect", ")", "dpi", "=", "float", "(", "wx", ".", "ScreenDC", "(", ")", ".", "GetPPI", "(", ")", "[", "0", "]", ")", "zoom", "=", "self", ".", "grid", ".", "grid_renderer", ".", "zoom", "return", "fig2bmp", "(", "result", ",", "merged_rect", ".", "width", ",", "merged_rect", ".", "height", ",", "dpi", ",", "zoom", ")", "# So we have result strings to be returned", "getter", "=", "self", ".", "_get_result_string", "return", "self", ".", "copy", "(", "selection", ",", "getter", "=", "getter", ")" ]
Returns result If selection consists of one cell only and result is a bitmap then the bitmap is returned. Otherwise the method returns string representations of the result for the given selection in a tab separated string.
[ "Returns", "result" ]
python
train
35.893617
StackStorm/pybind
pybind/nos/v7_2_0/interface/hundredgigabitethernet/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v7_2_0/interface/hundredgigabitethernet/__init__.py#L1314-L1337
def _set_openflow_interface_cfg(self, v, load=False): """ Setter method for openflow_interface_cfg, mapped from YANG variable /interface/hundredgigabitethernet/openflow_interface_cfg (container) If this variable is read-only (config: false) in the source YANG file, then _set_openflow_interface_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_openflow_interface_cfg() directly. YANG Description: OpenFlow configuration. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=openflow_interface_cfg.openflow_interface_cfg, is_container='container', presence=False, yang_name="openflow-interface-cfg", rest_name="openflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow configuration', u'callpoint': u'OpenFlowPhyInterfaceCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'sort-priority': u'108', u'alt-name': u'openflow'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """openflow_interface_cfg must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=openflow_interface_cfg.openflow_interface_cfg, is_container='container', presence=False, yang_name="openflow-interface-cfg", rest_name="openflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow configuration', u'callpoint': u'OpenFlowPhyInterfaceCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'sort-priority': u'108', u'alt-name': u'openflow'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""", }) self.__openflow_interface_cfg = t if hasattr(self, '_set'): self._set()
[ "def", "_set_openflow_interface_cfg", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "openflow_interface_cfg", ".", "openflow_interface_cfg", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"openflow-interface-cfg\"", ",", "rest_name", "=", "\"openflow\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'OpenFlow configuration'", ",", "u'callpoint'", ":", "u'OpenFlowPhyInterfaceCallpoint'", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", ",", "u'sort-priority'", ":", "u'108'", ",", "u'alt-name'", ":", "u'openflow'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-openflow'", ",", "defining_module", "=", "'brocade-openflow'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"openflow_interface_cfg must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=openflow_interface_cfg.openflow_interface_cfg, is_container='container', presence=False, yang_name=\"openflow-interface-cfg\", rest_name=\"openflow\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow configuration', u'callpoint': u'OpenFlowPhyInterfaceCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'sort-priority': u'108', u'alt-name': u'openflow'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__openflow_interface_cfg", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for openflow_interface_cfg, mapped from YANG variable /interface/hundredgigabitethernet/openflow_interface_cfg (container) If this variable is read-only (config: false) in the source YANG file, then _set_openflow_interface_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_openflow_interface_cfg() directly. YANG Description: OpenFlow configuration.
[ "Setter", "method", "for", "openflow_interface_cfg", "mapped", "from", "YANG", "variable", "/", "interface", "/", "hundredgigabitethernet", "/", "openflow_interface_cfg", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_openflow_interface_cfg", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_openflow_interface_cfg", "()", "directly", "." ]
python
train
89.083333
gwpy/gwpy
gwpy/signal/spectral/_registry.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/spectral/_registry.py#L36-L70
def register_method(func, name=None, deprecated=False): """Register a method of calculating an average spectrogram. Parameters ---------- func : `callable` function to execute name : `str`, optional name of the method, defaults to ``func.__name__`` deprecated : `bool`, optional whether this method is deprecated (`True`) or not (`False`) Returns ------- name : `str` the registered name of the function, which may differ pedantically from what was given by the user. """ # warn about deprecated functions if deprecated: func = deprecated_function( func, "the {0!r} PSD methods is deprecated, and will be removed " "in a future release, please consider using {1!r} instead".format( name, name.split('-', 1)[1], ), ) if name is None: name = func.__name__ name = _format_name(name) METHODS[name] = func return name
[ "def", "register_method", "(", "func", ",", "name", "=", "None", ",", "deprecated", "=", "False", ")", ":", "# warn about deprecated functions", "if", "deprecated", ":", "func", "=", "deprecated_function", "(", "func", ",", "\"the {0!r} PSD methods is deprecated, and will be removed \"", "\"in a future release, please consider using {1!r} instead\"", ".", "format", "(", "name", ",", "name", ".", "split", "(", "'-'", ",", "1", ")", "[", "1", "]", ",", ")", ",", ")", "if", "name", "is", "None", ":", "name", "=", "func", ".", "__name__", "name", "=", "_format_name", "(", "name", ")", "METHODS", "[", "name", "]", "=", "func", "return", "name" ]
Register a method of calculating an average spectrogram. Parameters ---------- func : `callable` function to execute name : `str`, optional name of the method, defaults to ``func.__name__`` deprecated : `bool`, optional whether this method is deprecated (`True`) or not (`False`) Returns ------- name : `str` the registered name of the function, which may differ pedantically from what was given by the user.
[ "Register", "a", "method", "of", "calculating", "an", "average", "spectrogram", "." ]
python
train
27.857143
bastibe/SoundFile
soundfile.py
https://github.com/bastibe/SoundFile/blob/161e930da9c9ea76579b6ee18a131e10bca8a605/soundfile.py#L1151-L1158
def close(self): """Close the file. Can be called multiple times.""" if not self.closed: # be sure to flush data to disk before closing the file self.flush() err = _snd.sf_close(self._file) self._file = None _error_check(err)
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "closed", ":", "# be sure to flush data to disk before closing the file", "self", ".", "flush", "(", ")", "err", "=", "_snd", ".", "sf_close", "(", "self", ".", "_file", ")", "self", ".", "_file", "=", "None", "_error_check", "(", "err", ")" ]
Close the file. Can be called multiple times.
[ "Close", "the", "file", ".", "Can", "be", "called", "multiple", "times", "." ]
python
train
36.875
vsoch/helpme
helpme/main/base/settings.py
https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/main/base/settings.py#L232-L258
def get_and_update_setting(self, name, default=None, user=True): '''Look for a setting in the environment (first priority) and then the settings file (second). If something is found, the settings file is updated. The order of operations works as follows: 1. The user config file is used as a cache for the variable 2. the environment variable always takes priority to cache, and if found, will update the cache. 3. If the variable is not found and the cache is set, we are good 5. If the variable is not found and the cache isn't set, return default (default is None) So the user of the function can assume a return of None equates to not set anywhere, and take the appropriate action. ''' setting = self._get_setting(name, user=user) if setting is None and default is not None: setting = default # If the setting is found, update the client secrets if setting is not None: updates = {name : setting} self._update_settings(updates) return setting
[ "def", "get_and_update_setting", "(", "self", ",", "name", ",", "default", "=", "None", ",", "user", "=", "True", ")", ":", "setting", "=", "self", ".", "_get_setting", "(", "name", ",", "user", "=", "user", ")", "if", "setting", "is", "None", "and", "default", "is", "not", "None", ":", "setting", "=", "default", "# If the setting is found, update the client secrets", "if", "setting", "is", "not", "None", ":", "updates", "=", "{", "name", ":", "setting", "}", "self", ".", "_update_settings", "(", "updates", ")", "return", "setting" ]
Look for a setting in the environment (first priority) and then the settings file (second). If something is found, the settings file is updated. The order of operations works as follows: 1. The user config file is used as a cache for the variable 2. the environment variable always takes priority to cache, and if found, will update the cache. 3. If the variable is not found and the cache is set, we are good 5. If the variable is not found and the cache isn't set, return default (default is None) So the user of the function can assume a return of None equates to not set anywhere, and take the appropriate action.
[ "Look", "for", "a", "setting", "in", "the", "environment", "(", "first", "priority", ")", "and", "then", "the", "settings", "file", "(", "second", ")", ".", "If", "something", "is", "found", "the", "settings", "file", "is", "updated", ".", "The", "order", "of", "operations", "works", "as", "follows", ":" ]
python
train
39.074074
theislab/scanpy
scanpy/neighbors/__init__.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/neighbors/__init__.py#L579-L661
def compute_neighbors( self, n_neighbors: int = 30, knn: bool = True, n_pcs: Optional[int] = None, use_rep: Optional[str] = None, method: str = 'umap', random_state: Optional[Union[RandomState, int]] = 0, write_knn_indices: bool = False, metric: str = 'euclidean', metric_kwds: Mapping[str, Any] = {} ) -> None: """\ Compute distances and connectivities of neighbors. Parameters ---------- n_neighbors Use this number of nearest neighbors. knn Restrict result to `n_neighbors` nearest neighbors. {n_pcs} {use_rep} Returns ------- Writes sparse graph attributes `.distances` and `.connectivities`. Also writes `.knn_indices` and `.knn_distances` if `write_knn_indices==True`. """ if n_neighbors > self._adata.shape[0]: # very small datasets n_neighbors = 1 + int(0.5*self._adata.shape[0]) logg.warn('n_obs too small: adjusting to `n_neighbors = {}`' .format(n_neighbors)) if method == 'umap' and not knn: raise ValueError('`method = \'umap\' only with `knn = True`.') if method not in {'umap', 'gauss'}: raise ValueError('`method` needs to be \'umap\' or \'gauss\'.') if self._adata.shape[0] >= 10000 and not knn: logg.warn( 'Using high n_obs without `knn=True` takes a lot of memory...') self.n_neighbors = n_neighbors self.knn = knn X = choose_representation(self._adata, use_rep=use_rep, n_pcs=n_pcs) # neighbor search use_dense_distances = (metric == 'euclidean' and X.shape[0] < 8192) or knn == False if use_dense_distances: _distances = pairwise_distances(X, metric=metric, **metric_kwds) knn_indices, knn_distances = get_indices_distances_from_dense_matrix( _distances, n_neighbors) if knn: self._distances = get_sparse_matrix_from_indices_distances_numpy( knn_indices, knn_distances, X.shape[0], n_neighbors) else: self._distances = _distances else: # non-euclidean case and approx nearest neighbors if X.shape[0] < 4096: X = pairwise_distances(X, metric=metric, **metric_kwds) metric = 'precomputed' knn_indices, knn_distances, _ = compute_neighbors_umap( X, n_neighbors, random_state, metric=metric, metric_kwds=metric_kwds) #self._rp_forest = _make_forest_dict(forest) # write indices as attributes if write_knn_indices: self.knn_indices = knn_indices self.knn_distances = knn_distances logg.msg('computed neighbors', t=True, v=4) if not use_dense_distances or method == 'umap': # we need self._distances also for method == 'gauss' if we didn't # use dense distances self._distances, self._connectivities = compute_connectivities_umap( knn_indices, knn_distances, self._adata.shape[0], self.n_neighbors) # overwrite the umap connectivities if method is 'gauss' # self._distances is unaffected by this if method == 'gauss': self._compute_connectivities_diffmap() logg.msg('computed connectivities', t=True, v=4) self._number_connected_components = 1 if issparse(self._connectivities): from scipy.sparse.csgraph import connected_components self._connected_components = connected_components(self._connectivities) self._number_connected_components = self._connected_components[0]
[ "def", "compute_neighbors", "(", "self", ",", "n_neighbors", ":", "int", "=", "30", ",", "knn", ":", "bool", "=", "True", ",", "n_pcs", ":", "Optional", "[", "int", "]", "=", "None", ",", "use_rep", ":", "Optional", "[", "str", "]", "=", "None", ",", "method", ":", "str", "=", "'umap'", ",", "random_state", ":", "Optional", "[", "Union", "[", "RandomState", ",", "int", "]", "]", "=", "0", ",", "write_knn_indices", ":", "bool", "=", "False", ",", "metric", ":", "str", "=", "'euclidean'", ",", "metric_kwds", ":", "Mapping", "[", "str", ",", "Any", "]", "=", "{", "}", ")", "->", "None", ":", "if", "n_neighbors", ">", "self", ".", "_adata", ".", "shape", "[", "0", "]", ":", "# very small datasets", "n_neighbors", "=", "1", "+", "int", "(", "0.5", "*", "self", ".", "_adata", ".", "shape", "[", "0", "]", ")", "logg", ".", "warn", "(", "'n_obs too small: adjusting to `n_neighbors = {}`'", ".", "format", "(", "n_neighbors", ")", ")", "if", "method", "==", "'umap'", "and", "not", "knn", ":", "raise", "ValueError", "(", "'`method = \\'umap\\' only with `knn = True`.'", ")", "if", "method", "not", "in", "{", "'umap'", ",", "'gauss'", "}", ":", "raise", "ValueError", "(", "'`method` needs to be \\'umap\\' or \\'gauss\\'.'", ")", "if", "self", ".", "_adata", ".", "shape", "[", "0", "]", ">=", "10000", "and", "not", "knn", ":", "logg", ".", "warn", "(", "'Using high n_obs without `knn=True` takes a lot of memory...'", ")", "self", ".", "n_neighbors", "=", "n_neighbors", "self", ".", "knn", "=", "knn", "X", "=", "choose_representation", "(", "self", ".", "_adata", ",", "use_rep", "=", "use_rep", ",", "n_pcs", "=", "n_pcs", ")", "# neighbor search", "use_dense_distances", "=", "(", "metric", "==", "'euclidean'", "and", "X", ".", "shape", "[", "0", "]", "<", "8192", ")", "or", "knn", "==", "False", "if", "use_dense_distances", ":", "_distances", "=", "pairwise_distances", "(", "X", ",", "metric", "=", "metric", ",", "*", "*", "metric_kwds", ")", "knn_indices", ",", "knn_distances", "=", "get_indices_distances_from_dense_matrix", "(", "_distances", ",", "n_neighbors", ")", "if", "knn", ":", "self", ".", "_distances", "=", "get_sparse_matrix_from_indices_distances_numpy", "(", "knn_indices", ",", "knn_distances", ",", "X", ".", "shape", "[", "0", "]", ",", "n_neighbors", ")", "else", ":", "self", ".", "_distances", "=", "_distances", "else", ":", "# non-euclidean case and approx nearest neighbors", "if", "X", ".", "shape", "[", "0", "]", "<", "4096", ":", "X", "=", "pairwise_distances", "(", "X", ",", "metric", "=", "metric", ",", "*", "*", "metric_kwds", ")", "metric", "=", "'precomputed'", "knn_indices", ",", "knn_distances", ",", "_", "=", "compute_neighbors_umap", "(", "X", ",", "n_neighbors", ",", "random_state", ",", "metric", "=", "metric", ",", "metric_kwds", "=", "metric_kwds", ")", "#self._rp_forest = _make_forest_dict(forest)", "# write indices as attributes", "if", "write_knn_indices", ":", "self", ".", "knn_indices", "=", "knn_indices", "self", ".", "knn_distances", "=", "knn_distances", "logg", ".", "msg", "(", "'computed neighbors'", ",", "t", "=", "True", ",", "v", "=", "4", ")", "if", "not", "use_dense_distances", "or", "method", "==", "'umap'", ":", "# we need self._distances also for method == 'gauss' if we didn't", "# use dense distances", "self", ".", "_distances", ",", "self", ".", "_connectivities", "=", "compute_connectivities_umap", "(", "knn_indices", ",", "knn_distances", ",", "self", ".", "_adata", ".", "shape", "[", "0", "]", ",", "self", ".", "n_neighbors", ")", "# overwrite the umap connectivities if method is 'gauss'", "# self._distances is unaffected by this", "if", "method", "==", "'gauss'", ":", "self", ".", "_compute_connectivities_diffmap", "(", ")", "logg", ".", "msg", "(", "'computed connectivities'", ",", "t", "=", "True", ",", "v", "=", "4", ")", "self", ".", "_number_connected_components", "=", "1", "if", "issparse", "(", "self", ".", "_connectivities", ")", ":", "from", "scipy", ".", "sparse", ".", "csgraph", "import", "connected_components", "self", ".", "_connected_components", "=", "connected_components", "(", "self", ".", "_connectivities", ")", "self", ".", "_number_connected_components", "=", "self", ".", "_connected_components", "[", "0", "]" ]
\ Compute distances and connectivities of neighbors. Parameters ---------- n_neighbors Use this number of nearest neighbors. knn Restrict result to `n_neighbors` nearest neighbors. {n_pcs} {use_rep} Returns ------- Writes sparse graph attributes `.distances` and `.connectivities`. Also writes `.knn_indices` and `.knn_distances` if `write_knn_indices==True`.
[ "\\", "Compute", "distances", "and", "connectivities", "of", "neighbors", "." ]
python
train
44.939759
tanghaibao/goatools
goatools/grouper/grprplt.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/grprplt.py#L125-L144
def plot_grouped_gos(self, fout_img=None, exclude_hdrs=None, **kws_usr): """One Plot containing all user GOs (yellow or green) and header GO IDs(green or purple).""" # kws_plt -> go2color go2bordercolor kws_plt, kws_dag = self._get_kws_plt(self.grprobj.usrgos, **kws_usr) pltgosusr = self.grprobj.usrgos if exclude_hdrs is not None: pltgosusr = pltgosusr.difference(self.grprobj.get_usrgos_g_hdrgos(exclude_hdrs)) if fout_img is None: fout_img = "{GRP_NAME}.png".format(GRP_NAME=self.grprobj.grpname) # Split one plot into potentially three (BP, MF, CC) if png filename contains '{NS}' if '{NS}' in fout_img: go2nt = self.grprobj.gosubdag.get_go2nt(pltgosusr) for namespace in ['BP', 'MF', 'CC']: pltgos_ns = [go for go in pltgosusr if go2nt[go].NS == namespace] if pltgos_ns: png = fout_img.format(NS=namespace) self._plot_grouped_gos(png, pltgos_ns, kws_plt, kws_dag) # Plot all user GO IDs into a single plot, regardless of their namespace else: self._plot_grouped_gos(fout_img, pltgosusr, kws_plt, kws_dag)
[ "def", "plot_grouped_gos", "(", "self", ",", "fout_img", "=", "None", ",", "exclude_hdrs", "=", "None", ",", "*", "*", "kws_usr", ")", ":", "# kws_plt -> go2color go2bordercolor", "kws_plt", ",", "kws_dag", "=", "self", ".", "_get_kws_plt", "(", "self", ".", "grprobj", ".", "usrgos", ",", "*", "*", "kws_usr", ")", "pltgosusr", "=", "self", ".", "grprobj", ".", "usrgos", "if", "exclude_hdrs", "is", "not", "None", ":", "pltgosusr", "=", "pltgosusr", ".", "difference", "(", "self", ".", "grprobj", ".", "get_usrgos_g_hdrgos", "(", "exclude_hdrs", ")", ")", "if", "fout_img", "is", "None", ":", "fout_img", "=", "\"{GRP_NAME}.png\"", ".", "format", "(", "GRP_NAME", "=", "self", ".", "grprobj", ".", "grpname", ")", "# Split one plot into potentially three (BP, MF, CC) if png filename contains '{NS}'", "if", "'{NS}'", "in", "fout_img", ":", "go2nt", "=", "self", ".", "grprobj", ".", "gosubdag", ".", "get_go2nt", "(", "pltgosusr", ")", "for", "namespace", "in", "[", "'BP'", ",", "'MF'", ",", "'CC'", "]", ":", "pltgos_ns", "=", "[", "go", "for", "go", "in", "pltgosusr", "if", "go2nt", "[", "go", "]", ".", "NS", "==", "namespace", "]", "if", "pltgos_ns", ":", "png", "=", "fout_img", ".", "format", "(", "NS", "=", "namespace", ")", "self", ".", "_plot_grouped_gos", "(", "png", ",", "pltgos_ns", ",", "kws_plt", ",", "kws_dag", ")", "# Plot all user GO IDs into a single plot, regardless of their namespace", "else", ":", "self", ".", "_plot_grouped_gos", "(", "fout_img", ",", "pltgosusr", ",", "kws_plt", ",", "kws_dag", ")" ]
One Plot containing all user GOs (yellow or green) and header GO IDs(green or purple).
[ "One", "Plot", "containing", "all", "user", "GOs", "(", "yellow", "or", "green", ")", "and", "header", "GO", "IDs", "(", "green", "or", "purple", ")", "." ]
python
train
60.15
EconForge/dolo
dolo/numeric/discretization/quadrature.py
https://github.com/EconForge/dolo/blob/d91ddf148b009bf79852d9aec70f3a1877e0f79a/dolo/numeric/discretization/quadrature.py#L59-L122
def gauss_hermite_nodes(orders, sigma, mu=None): ''' Computes the weights and nodes for Gauss Hermite quadrature. Parameters ---------- orders : int, list, array The order of integration used in the quadrature routine sigma : array-like If one dimensional, the variance of the normal distribution being approximated. If multidimensional, the variance-covariance matrix of the multivariate normal process being approximated. Returns ------- x : array Quadrature nodes w : array Quadrature weights ''' if isinstance(orders, int): orders = [orders] import numpy if mu is None: mu = numpy.array( [0]*sigma.shape[0] ) herms = [hermgauss(i) for i in orders] points = [ h[0]*numpy.sqrt(2) for h in herms] weights = [ h[1]/numpy.sqrt( numpy.pi) for h in herms] if len(orders) == 1: # Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1. # print(points.shape) x = numpy.array(points[0])*numpy.sqrt(float(sigma)) if sigma.ndim==2: x = x[:,None] w = weights[0] return [x,w] else: x = cartesian( points).T from functools import reduce w = reduce( numpy.kron, weights) zero_columns = numpy.where(sigma.sum(axis=0)==0)[0] for i in zero_columns: sigma[i,i] = 1.0 C = numpy.linalg.cholesky(sigma) x = numpy.dot(C, x) + mu[:,numpy.newaxis] x = numpy.ascontiguousarray(x.T) for i in zero_columns: x[:,i] =0 return [x,w]
[ "def", "gauss_hermite_nodes", "(", "orders", ",", "sigma", ",", "mu", "=", "None", ")", ":", "if", "isinstance", "(", "orders", ",", "int", ")", ":", "orders", "=", "[", "orders", "]", "import", "numpy", "if", "mu", "is", "None", ":", "mu", "=", "numpy", ".", "array", "(", "[", "0", "]", "*", "sigma", ".", "shape", "[", "0", "]", ")", "herms", "=", "[", "hermgauss", "(", "i", ")", "for", "i", "in", "orders", "]", "points", "=", "[", "h", "[", "0", "]", "*", "numpy", ".", "sqrt", "(", "2", ")", "for", "h", "in", "herms", "]", "weights", "=", "[", "h", "[", "1", "]", "/", "numpy", ".", "sqrt", "(", "numpy", ".", "pi", ")", "for", "h", "in", "herms", "]", "if", "len", "(", "orders", ")", "==", "1", ":", "# Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1.", "# print(points.shape)", "x", "=", "numpy", ".", "array", "(", "points", "[", "0", "]", ")", "*", "numpy", ".", "sqrt", "(", "float", "(", "sigma", ")", ")", "if", "sigma", ".", "ndim", "==", "2", ":", "x", "=", "x", "[", ":", ",", "None", "]", "w", "=", "weights", "[", "0", "]", "return", "[", "x", ",", "w", "]", "else", ":", "x", "=", "cartesian", "(", "points", ")", ".", "T", "from", "functools", "import", "reduce", "w", "=", "reduce", "(", "numpy", ".", "kron", ",", "weights", ")", "zero_columns", "=", "numpy", ".", "where", "(", "sigma", ".", "sum", "(", "axis", "=", "0", ")", "==", "0", ")", "[", "0", "]", "for", "i", "in", "zero_columns", ":", "sigma", "[", "i", ",", "i", "]", "=", "1.0", "C", "=", "numpy", ".", "linalg", ".", "cholesky", "(", "sigma", ")", "x", "=", "numpy", ".", "dot", "(", "C", ",", "x", ")", "+", "mu", "[", ":", ",", "numpy", ".", "newaxis", "]", "x", "=", "numpy", ".", "ascontiguousarray", "(", "x", ".", "T", ")", "for", "i", "in", "zero_columns", ":", "x", "[", ":", ",", "i", "]", "=", "0", "return", "[", "x", ",", "w", "]" ]
Computes the weights and nodes for Gauss Hermite quadrature. Parameters ---------- orders : int, list, array The order of integration used in the quadrature routine sigma : array-like If one dimensional, the variance of the normal distribution being approximated. If multidimensional, the variance-covariance matrix of the multivariate normal process being approximated. Returns ------- x : array Quadrature nodes w : array Quadrature weights
[ "Computes", "the", "weights", "and", "nodes", "for", "Gauss", "Hermite", "quadrature", "." ]
python
train
24.625
log2timeline/plaso
plaso/parsers/sqlite_plugins/firefox.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/sqlite_plugins/firefox.py#L467-L490
def _ReverseHostname(self, hostname): """Reverses the hostname and strips the leading dot. The hostname entry is reversed: moc.elgoog.www. Should be: www.google.com Args: hostname (str): reversed hostname. Returns: str: hostname without a leading dot. """ if not hostname: return '' if len(hostname) <= 1: return hostname if hostname[-1] == '.': return hostname[::-1][1:] return hostname[::-1][0:]
[ "def", "_ReverseHostname", "(", "self", ",", "hostname", ")", ":", "if", "not", "hostname", ":", "return", "''", "if", "len", "(", "hostname", ")", "<=", "1", ":", "return", "hostname", "if", "hostname", "[", "-", "1", "]", "==", "'.'", ":", "return", "hostname", "[", ":", ":", "-", "1", "]", "[", "1", ":", "]", "return", "hostname", "[", ":", ":", "-", "1", "]", "[", "0", ":", "]" ]
Reverses the hostname and strips the leading dot. The hostname entry is reversed: moc.elgoog.www. Should be: www.google.com Args: hostname (str): reversed hostname. Returns: str: hostname without a leading dot.
[ "Reverses", "the", "hostname", "and", "strips", "the", "leading", "dot", "." ]
python
train
19.25
assamite/creamas
creamas/mp.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/mp.py#L886-L915
def spawn_container(addr, env_cls=Environment, mgr_cls=EnvManager, set_seed=True, *args, **kwargs): """Spawn a new environment in a given address as a coroutine. Arguments and keyword arguments are passed down to the created environment at initialization time. If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is installed, this function renames the title of the process to start with 'creamas' so that the process is easily identifiable, e.g. with ``ps -x | grep creamas``. """ # Try setting the process name to easily recognize the spawned # environments with 'ps -x' or 'top' try: import setproctitle as spt title = 'creamas: {}({})'.format(env_cls.__class__.__name__, _get_base_url(addr)) spt.setproctitle(title) except: pass if set_seed: _set_random_seeds() # kwargs['codec'] = aiomas.MsgPack task = start(addr, env_cls, mgr_cls, *args, **kwargs) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(task)
[ "def", "spawn_container", "(", "addr", ",", "env_cls", "=", "Environment", ",", "mgr_cls", "=", "EnvManager", ",", "set_seed", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Try setting the process name to easily recognize the spawned", "# environments with 'ps -x' or 'top'", "try", ":", "import", "setproctitle", "as", "spt", "title", "=", "'creamas: {}({})'", ".", "format", "(", "env_cls", ".", "__class__", ".", "__name__", ",", "_get_base_url", "(", "addr", ")", ")", "spt", ".", "setproctitle", "(", "title", ")", "except", ":", "pass", "if", "set_seed", ":", "_set_random_seeds", "(", ")", "# kwargs['codec'] = aiomas.MsgPack", "task", "=", "start", "(", "addr", ",", "env_cls", ",", "mgr_cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", "loop", "=", "asyncio", ".", "new_event_loop", "(", ")", "asyncio", ".", "set_event_loop", "(", "loop", ")", "loop", ".", "run_until_complete", "(", "task", ")" ]
Spawn a new environment in a given address as a coroutine. Arguments and keyword arguments are passed down to the created environment at initialization time. If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is installed, this function renames the title of the process to start with 'creamas' so that the process is easily identifiable, e.g. with ``ps -x | grep creamas``.
[ "Spawn", "a", "new", "environment", "in", "a", "given", "address", "as", "a", "coroutine", "." ]
python
train
36.8
KnowledgeLinks/rdfframework
rdfframework/utilities/codetimer.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/utilities/codetimer.py#L16-L24
def log(self, timer_name, node): ''' logs a event in the timer ''' timestamp = time.time() if hasattr(self, timer_name): getattr(self, timer_name).append({ "node":node, "time":timestamp}) else: setattr(self, timer_name, [{"node":node, "time":timestamp}])
[ "def", "log", "(", "self", ",", "timer_name", ",", "node", ")", ":", "timestamp", "=", "time", ".", "time", "(", ")", "if", "hasattr", "(", "self", ",", "timer_name", ")", ":", "getattr", "(", "self", ",", "timer_name", ")", ".", "append", "(", "{", "\"node\"", ":", "node", ",", "\"time\"", ":", "timestamp", "}", ")", "else", ":", "setattr", "(", "self", ",", "timer_name", ",", "[", "{", "\"node\"", ":", "node", ",", "\"time\"", ":", "timestamp", "}", "]", ")" ]
logs a event in the timer
[ "logs", "a", "event", "in", "the", "timer" ]
python
train
37.111111
apache/incubator-heron
heron/statemgrs/src/python/config.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/statemgrs/src/python/config.py#L45-L50
def validate_state_locations(self): """ Names of all state locations must be unique. """ names = map(lambda loc: loc["name"], self.locations) assert len(names) == len(set(names)), "Names of state locations must be unique"
[ "def", "validate_state_locations", "(", "self", ")", ":", "names", "=", "map", "(", "lambda", "loc", ":", "loc", "[", "\"name\"", "]", ",", "self", ".", "locations", ")", "assert", "len", "(", "names", ")", "==", "len", "(", "set", "(", "names", ")", ")", ",", "\"Names of state locations must be unique\"" ]
Names of all state locations must be unique.
[ "Names", "of", "all", "state", "locations", "must", "be", "unique", "." ]
python
valid
39.333333
dustin/twitty-twister
twittytwister/streaming.py
https://github.com/dustin/twitty-twister/blob/8524750ee73adb57bbe14ef0cfd8aa08e1e59fb3/twittytwister/streaming.py#L35-L48
def lineReceived(self, line): """ Called when a line is received. We expect a length in bytes or an empty line for keep-alive. If we got a length, switch to raw mode to receive that amount of bytes. """ if line and line.isdigit(): self._expectedLength = int(line) self._rawBuffer = [] self._rawBufferLength = 0 self.setRawMode() else: self.keepAliveReceived()
[ "def", "lineReceived", "(", "self", ",", "line", ")", ":", "if", "line", "and", "line", ".", "isdigit", "(", ")", ":", "self", ".", "_expectedLength", "=", "int", "(", "line", ")", "self", ".", "_rawBuffer", "=", "[", "]", "self", ".", "_rawBufferLength", "=", "0", "self", ".", "setRawMode", "(", ")", "else", ":", "self", ".", "keepAliveReceived", "(", ")" ]
Called when a line is received. We expect a length in bytes or an empty line for keep-alive. If we got a length, switch to raw mode to receive that amount of bytes.
[ "Called", "when", "a", "line", "is", "received", "." ]
python
train
33.071429
aiortc/aiortc
aiortc/rtcrtpsender.py
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcrtpsender.py#L140-L162
async def send(self, parameters: RTCRtpSendParameters): """ Attempt to set the parameters controlling the sending of media. :param: parameters: The :class:`RTCRtpParameters` for the sender. """ if not self.__started: self.__cname = parameters.rtcp.cname self.__mid = parameters.muxId # make note of the RTP header extension IDs self.__transport._register_rtp_sender(self, parameters) self.__rtp_header_extensions_map.configure(parameters) # make note of RTX payload type for codec in parameters.codecs: if is_rtx(codec) and codec.parameters['apt'] == parameters.codecs[0].payloadType: self.__rtx_payload_type = codec.payloadType break self.__rtp_task = asyncio.ensure_future(self._run_rtp(parameters.codecs[0])) self.__rtcp_task = asyncio.ensure_future(self._run_rtcp()) self.__started = True
[ "async", "def", "send", "(", "self", ",", "parameters", ":", "RTCRtpSendParameters", ")", ":", "if", "not", "self", ".", "__started", ":", "self", ".", "__cname", "=", "parameters", ".", "rtcp", ".", "cname", "self", ".", "__mid", "=", "parameters", ".", "muxId", "# make note of the RTP header extension IDs", "self", ".", "__transport", ".", "_register_rtp_sender", "(", "self", ",", "parameters", ")", "self", ".", "__rtp_header_extensions_map", ".", "configure", "(", "parameters", ")", "# make note of RTX payload type", "for", "codec", "in", "parameters", ".", "codecs", ":", "if", "is_rtx", "(", "codec", ")", "and", "codec", ".", "parameters", "[", "'apt'", "]", "==", "parameters", ".", "codecs", "[", "0", "]", ".", "payloadType", ":", "self", ".", "__rtx_payload_type", "=", "codec", ".", "payloadType", "break", "self", ".", "__rtp_task", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "_run_rtp", "(", "parameters", ".", "codecs", "[", "0", "]", ")", ")", "self", ".", "__rtcp_task", "=", "asyncio", ".", "ensure_future", "(", "self", ".", "_run_rtcp", "(", ")", ")", "self", ".", "__started", "=", "True" ]
Attempt to set the parameters controlling the sending of media. :param: parameters: The :class:`RTCRtpParameters` for the sender.
[ "Attempt", "to", "set", "the", "parameters", "controlling", "the", "sending", "of", "media", "." ]
python
train
43.043478
hobson/pug-dj
pug/dj/db.py
https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/db.py#L651-L683
def sum_in_date(x='date', y='net_sales', filter_dict=None, model='WikiItem', app=DEFAULT_APP, sort=True, limit=100000): """ Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts. FIXME: Tests need models with a date field: Examples: >> x, y = sum_in_date(y='net_sales', filter_dict={'model__startswith': 'LC60'}, model='Permission', limit=5, sort=1) >> len(x) == len(y) == 5 True >> y[1] >= y[0] True """ sort = sort_prefix(sort) model = get_model(model, app) filter_dict = filter_dict or {} objects = model.objects.filter(**filter_dict) # only the x values are now in the queryset (datetime information) objects = objects.values(x) objects = objects.annotate(y=djmodels.Sum(y)) if sort is not None: # FIXME: this duplicates the dict of lists sort below objects = objects.order_by(sort + 'y') objects = objects.all() if limit: objects = objects[:int(limit)] objects = util.sod_transposed(objects) if sort is not None: objects = sorted_dict_of_lists(objects, field_names=['y', x], reverse=bool(sort=='-')) if not x in objects or not 'y' in objects: return [], [] else: return objects[x], objects['y']
[ "def", "sum_in_date", "(", "x", "=", "'date'", ",", "y", "=", "'net_sales'", ",", "filter_dict", "=", "None", ",", "model", "=", "'WikiItem'", ",", "app", "=", "DEFAULT_APP", ",", "sort", "=", "True", ",", "limit", "=", "100000", ")", ":", "sort", "=", "sort_prefix", "(", "sort", ")", "model", "=", "get_model", "(", "model", ",", "app", ")", "filter_dict", "=", "filter_dict", "or", "{", "}", "objects", "=", "model", ".", "objects", ".", "filter", "(", "*", "*", "filter_dict", ")", "# only the x values are now in the queryset (datetime information)", "objects", "=", "objects", ".", "values", "(", "x", ")", "objects", "=", "objects", ".", "annotate", "(", "y", "=", "djmodels", ".", "Sum", "(", "y", ")", ")", "if", "sort", "is", "not", "None", ":", "# FIXME: this duplicates the dict of lists sort below", "objects", "=", "objects", ".", "order_by", "(", "sort", "+", "'y'", ")", "objects", "=", "objects", ".", "all", "(", ")", "if", "limit", ":", "objects", "=", "objects", "[", ":", "int", "(", "limit", ")", "]", "objects", "=", "util", ".", "sod_transposed", "(", "objects", ")", "if", "sort", "is", "not", "None", ":", "objects", "=", "sorted_dict_of_lists", "(", "objects", ",", "field_names", "=", "[", "'y'", ",", "x", "]", ",", "reverse", "=", "bool", "(", "sort", "==", "'-'", ")", ")", "if", "not", "x", "in", "objects", "or", "not", "'y'", "in", "objects", ":", "return", "[", "]", ",", "[", "]", "else", ":", "return", "objects", "[", "x", "]", ",", "objects", "[", "'y'", "]" ]
Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts. FIXME: Tests need models with a date field: Examples: >> x, y = sum_in_date(y='net_sales', filter_dict={'model__startswith': 'LC60'}, model='Permission', limit=5, sort=1) >> len(x) == len(y) == 5 True >> y[1] >= y[0] True
[ "Count", "the", "number", "of", "records", "for", "each", "discrete", "(", "categorical", ")", "value", "of", "a", "field", "and", "return", "a", "dict", "of", "two", "lists", "the", "field", "values", "and", "the", "counts", "." ]
python
train
39.69697
gem/oq-engine
openquake/calculators/views.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/views.py#L838-L864
def view_dupl_sources(token, dstore): """ Show the sources with the same ID and the truly duplicated sources """ fields = ['source_id', 'code', 'gidx1', 'gidx2', 'num_ruptures'] dic = group_array(dstore['source_info'].value[fields], 'source_id') sameid = [] dupl = [] for source_id, group in dic.items(): if len(group) > 1: # same ID sources sources = [] for rec in group: geom = dstore['source_geom'][rec['gidx1']:rec['gidx2']] src = Source(source_id, rec['code'], geom, rec['num_ruptures']) sources.append(src) if all_equal(sources): dupl.append(source_id) sameid.append(source_id) if not dupl: return '' msg = str(dupl) + '\n' msg += ('Found %d source(s) with the same ID and %d true duplicate(s)' % (len(sameid), len(dupl))) fakedupl = set(sameid) - set(dupl) if fakedupl: msg += '\nHere is a fake duplicate: %s' % fakedupl.pop() return msg
[ "def", "view_dupl_sources", "(", "token", ",", "dstore", ")", ":", "fields", "=", "[", "'source_id'", ",", "'code'", ",", "'gidx1'", ",", "'gidx2'", ",", "'num_ruptures'", "]", "dic", "=", "group_array", "(", "dstore", "[", "'source_info'", "]", ".", "value", "[", "fields", "]", ",", "'source_id'", ")", "sameid", "=", "[", "]", "dupl", "=", "[", "]", "for", "source_id", ",", "group", "in", "dic", ".", "items", "(", ")", ":", "if", "len", "(", "group", ")", ">", "1", ":", "# same ID sources", "sources", "=", "[", "]", "for", "rec", "in", "group", ":", "geom", "=", "dstore", "[", "'source_geom'", "]", "[", "rec", "[", "'gidx1'", "]", ":", "rec", "[", "'gidx2'", "]", "]", "src", "=", "Source", "(", "source_id", ",", "rec", "[", "'code'", "]", ",", "geom", ",", "rec", "[", "'num_ruptures'", "]", ")", "sources", ".", "append", "(", "src", ")", "if", "all_equal", "(", "sources", ")", ":", "dupl", ".", "append", "(", "source_id", ")", "sameid", ".", "append", "(", "source_id", ")", "if", "not", "dupl", ":", "return", "''", "msg", "=", "str", "(", "dupl", ")", "+", "'\\n'", "msg", "+=", "(", "'Found %d source(s) with the same ID and %d true duplicate(s)'", "%", "(", "len", "(", "sameid", ")", ",", "len", "(", "dupl", ")", ")", ")", "fakedupl", "=", "set", "(", "sameid", ")", "-", "set", "(", "dupl", ")", "if", "fakedupl", ":", "msg", "+=", "'\\nHere is a fake duplicate: %s'", "%", "fakedupl", ".", "pop", "(", ")", "return", "msg" ]
Show the sources with the same ID and the truly duplicated sources
[ "Show", "the", "sources", "with", "the", "same", "ID", "and", "the", "truly", "duplicated", "sources" ]
python
train
37.888889
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py#L181-L187
def _defaults(self, keys=None): """create an empty record""" d = {} keys = self._keys if keys is None else keys for key in keys: d[key] = None return d
[ "def", "_defaults", "(", "self", ",", "keys", "=", "None", ")", ":", "d", "=", "{", "}", "keys", "=", "self", ".", "_keys", "if", "keys", "is", "None", "else", "keys", "for", "key", "in", "keys", ":", "d", "[", "key", "]", "=", "None", "return", "d" ]
create an empty record
[ "create", "an", "empty", "record" ]
python
test
28.142857
dailymuse/oz
oz/core/actions.py
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/core/actions.py#L36-L45
def config_maker(project_name, path): """Creates a config file based on the project name""" with open(skeleton_path("config.py"), "r") as config_source: config_content = config_source.read() config_content = config_content.replace("__PROJECT_NAME__", project_name) with open(path, "w") as config_dest: config_dest.write(config_content)
[ "def", "config_maker", "(", "project_name", ",", "path", ")", ":", "with", "open", "(", "skeleton_path", "(", "\"config.py\"", ")", ",", "\"r\"", ")", "as", "config_source", ":", "config_content", "=", "config_source", ".", "read", "(", ")", "config_content", "=", "config_content", ".", "replace", "(", "\"__PROJECT_NAME__\"", ",", "project_name", ")", "with", "open", "(", "path", ",", "\"w\"", ")", "as", "config_dest", ":", "config_dest", ".", "write", "(", "config_content", ")" ]
Creates a config file based on the project name
[ "Creates", "a", "config", "file", "based", "on", "the", "project", "name" ]
python
train
36.1
vatlab/SoS
src/sos/utils.py
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/utils.py#L1476-L1495
def tail_of_file(filename, n, ansi2html=False): """Reads a n lines from f with an offset of offset lines. """ avg_line_length = 74 to_read = n with open(filename) as f: while 1: try: f.seek(-(avg_line_length * to_read), 2) except IOError: # woops. apparently file is smaller than what we want # to step back, go to the beginning instead f.seek(0) pos = f.tell() lines = f.read().splitlines() if len(lines) >= to_read or pos == 0: if ansi2html: return convertAnsi2html('\n'.join(lines[-to_read:])) return '\n'.join(lines[-to_read:]) + '\n' avg_line_length *= 1.3
[ "def", "tail_of_file", "(", "filename", ",", "n", ",", "ansi2html", "=", "False", ")", ":", "avg_line_length", "=", "74", "to_read", "=", "n", "with", "open", "(", "filename", ")", "as", "f", ":", "while", "1", ":", "try", ":", "f", ".", "seek", "(", "-", "(", "avg_line_length", "*", "to_read", ")", ",", "2", ")", "except", "IOError", ":", "# woops. apparently file is smaller than what we want", "# to step back, go to the beginning instead", "f", ".", "seek", "(", "0", ")", "pos", "=", "f", ".", "tell", "(", ")", "lines", "=", "f", ".", "read", "(", ")", ".", "splitlines", "(", ")", "if", "len", "(", "lines", ")", ">=", "to_read", "or", "pos", "==", "0", ":", "if", "ansi2html", ":", "return", "convertAnsi2html", "(", "'\\n'", ".", "join", "(", "lines", "[", "-", "to_read", ":", "]", ")", ")", "return", "'\\n'", ".", "join", "(", "lines", "[", "-", "to_read", ":", "]", ")", "+", "'\\n'", "avg_line_length", "*=", "1.3" ]
Reads a n lines from f with an offset of offset lines.
[ "Reads", "a", "n", "lines", "from", "f", "with", "an", "offset", "of", "offset", "lines", "." ]
python
train
37.8
saltstack/salt
salt/states/azurearm_dns.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_dns.py#L300-L360
def zone_absent(name, resource_group, connection_auth=None): ''' .. versionadded:: Fluorine Ensure a DNS zone does not exist in the resource group. :param name: Name of the DNS zone. :param resource_group: The resource group assigned to the DNS zone. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. ''' ret = { 'name': name, 'result': False, 'comment': '', 'changes': {} } if not isinstance(connection_auth, dict): ret['comment'] = 'Connection information must be specified via connection_auth dictionary!' return ret zone = __salt__['azurearm_dns.zone_get']( name, resource_group, azurearm_log_level='info', **connection_auth ) if 'error' in zone: ret['result'] = True ret['comment'] = 'DNS zone {0} was not found.'.format(name) return ret elif __opts__['test']: ret['comment'] = 'DNS zone {0} would be deleted.'.format(name) ret['result'] = None ret['changes'] = { 'old': zone, 'new': {}, } return ret deleted = __salt__['azurearm_dns.zone_delete'](name, resource_group, **connection_auth) if deleted: ret['result'] = True ret['comment'] = 'DNS zone {0} has been deleted.'.format(name) ret['changes'] = { 'old': zone, 'new': {} } return ret ret['comment'] = 'Failed to delete DNS zone {0}!'.format(name) return ret
[ "def", "zone_absent", "(", "name", ",", "resource_group", ",", "connection_auth", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "if", "not", "isinstance", "(", "connection_auth", ",", "dict", ")", ":", "ret", "[", "'comment'", "]", "=", "'Connection information must be specified via connection_auth dictionary!'", "return", "ret", "zone", "=", "__salt__", "[", "'azurearm_dns.zone_get'", "]", "(", "name", ",", "resource_group", ",", "azurearm_log_level", "=", "'info'", ",", "*", "*", "connection_auth", ")", "if", "'error'", "in", "zone", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'DNS zone {0} was not found.'", ".", "format", "(", "name", ")", "return", "ret", "elif", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'DNS zone {0} would be deleted.'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "zone", ",", "'new'", ":", "{", "}", ",", "}", "return", "ret", "deleted", "=", "__salt__", "[", "'azurearm_dns.zone_delete'", "]", "(", "name", ",", "resource_group", ",", "*", "*", "connection_auth", ")", "if", "deleted", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'DNS zone {0} has been deleted.'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "zone", ",", "'new'", ":", "{", "}", "}", "return", "ret", "ret", "[", "'comment'", "]", "=", "'Failed to delete DNS zone {0}!'", ".", "format", "(", "name", ")", "return", "ret" ]
.. versionadded:: Fluorine Ensure a DNS zone does not exist in the resource group. :param name: Name of the DNS zone. :param resource_group: The resource group assigned to the DNS zone. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API.
[ "..", "versionadded", "::", "Fluorine" ]
python
train
26.131148
minio/minio-py
minio/xml_marshal.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/xml_marshal.py#L165-L199
def _add_notification_config_to_xml(node, element_name, configs): """ Internal function that builds the XML sub-structure for a given kind of notification configuration. """ for config in configs: config_node = s3_xml.SubElement(node, element_name) if 'Id' in config: id_node = s3_xml.SubElement(config_node, 'Id') id_node.text = config['Id'] arn_node = s3_xml.SubElement( config_node, NOTIFICATIONS_ARN_FIELDNAME_MAP[element_name] ) arn_node.text = config['Arn'] for event in config['Events']: event_node = s3_xml.SubElement(config_node, 'Event') event_node.text = event filter_rules = config.get('Filter', {}).get( 'Key', {}).get('FilterRules', []) if filter_rules: filter_node = s3_xml.SubElement(config_node, 'Filter') s3key_node = s3_xml.SubElement(filter_node, 'S3Key') for filter_rule in filter_rules: filter_rule_node = s3_xml.SubElement(s3key_node, 'FilterRule') name_node = s3_xml.SubElement(filter_rule_node, 'Name') name_node.text = filter_rule['Name'] value_node = s3_xml.SubElement(filter_rule_node, 'Value') value_node.text = filter_rule['Value'] return node
[ "def", "_add_notification_config_to_xml", "(", "node", ",", "element_name", ",", "configs", ")", ":", "for", "config", "in", "configs", ":", "config_node", "=", "s3_xml", ".", "SubElement", "(", "node", ",", "element_name", ")", "if", "'Id'", "in", "config", ":", "id_node", "=", "s3_xml", ".", "SubElement", "(", "config_node", ",", "'Id'", ")", "id_node", ".", "text", "=", "config", "[", "'Id'", "]", "arn_node", "=", "s3_xml", ".", "SubElement", "(", "config_node", ",", "NOTIFICATIONS_ARN_FIELDNAME_MAP", "[", "element_name", "]", ")", "arn_node", ".", "text", "=", "config", "[", "'Arn'", "]", "for", "event", "in", "config", "[", "'Events'", "]", ":", "event_node", "=", "s3_xml", ".", "SubElement", "(", "config_node", ",", "'Event'", ")", "event_node", ".", "text", "=", "event", "filter_rules", "=", "config", ".", "get", "(", "'Filter'", ",", "{", "}", ")", ".", "get", "(", "'Key'", ",", "{", "}", ")", ".", "get", "(", "'FilterRules'", ",", "[", "]", ")", "if", "filter_rules", ":", "filter_node", "=", "s3_xml", ".", "SubElement", "(", "config_node", ",", "'Filter'", ")", "s3key_node", "=", "s3_xml", ".", "SubElement", "(", "filter_node", ",", "'S3Key'", ")", "for", "filter_rule", "in", "filter_rules", ":", "filter_rule_node", "=", "s3_xml", ".", "SubElement", "(", "s3key_node", ",", "'FilterRule'", ")", "name_node", "=", "s3_xml", ".", "SubElement", "(", "filter_rule_node", ",", "'Name'", ")", "name_node", ".", "text", "=", "filter_rule", "[", "'Name'", "]", "value_node", "=", "s3_xml", ".", "SubElement", "(", "filter_rule_node", ",", "'Value'", ")", "value_node", ".", "text", "=", "filter_rule", "[", "'Value'", "]", "return", "node" ]
Internal function that builds the XML sub-structure for a given kind of notification configuration.
[ "Internal", "function", "that", "builds", "the", "XML", "sub", "-", "structure", "for", "a", "given", "kind", "of", "notification", "configuration", "." ]
python
train
38.057143
RudolfCardinal/pythonlib
cardinal_pythonlib/sqlalchemy/schema.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/sqlalchemy/schema.py#L595-L611
def giant_text_sqltype(dialect: Dialect) -> str: """ Returns the SQL column type used to make very large text columns for a given dialect. Args: dialect: a SQLAlchemy :class:`Dialect` Returns: the SQL data type of "giant text", typically 'LONGTEXT' for MySQL and 'NVARCHAR(MAX)' for SQL Server. """ if dialect.name == SqlaDialectName.SQLSERVER: return 'NVARCHAR(MAX)' elif dialect.name == SqlaDialectName.MYSQL: return 'LONGTEXT' else: raise ValueError("Unknown dialect: {}".format(dialect.name))
[ "def", "giant_text_sqltype", "(", "dialect", ":", "Dialect", ")", "->", "str", ":", "if", "dialect", ".", "name", "==", "SqlaDialectName", ".", "SQLSERVER", ":", "return", "'NVARCHAR(MAX)'", "elif", "dialect", ".", "name", "==", "SqlaDialectName", ".", "MYSQL", ":", "return", "'LONGTEXT'", "else", ":", "raise", "ValueError", "(", "\"Unknown dialect: {}\"", ".", "format", "(", "dialect", ".", "name", ")", ")" ]
Returns the SQL column type used to make very large text columns for a given dialect. Args: dialect: a SQLAlchemy :class:`Dialect` Returns: the SQL data type of "giant text", typically 'LONGTEXT' for MySQL and 'NVARCHAR(MAX)' for SQL Server.
[ "Returns", "the", "SQL", "column", "type", "used", "to", "make", "very", "large", "text", "columns", "for", "a", "given", "dialect", "." ]
python
train
33.235294
SeleniumHQ/selenium
py/selenium/webdriver/remote/switch_to.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/switch_to.py#L125-L141
def window(self, window_name): """ Switches focus to the specified window. :Args: - window_name: The name or window handle of the window to switch to. :Usage: :: driver.switch_to.window('main') """ if self._driver.w3c: self._w3c_window(window_name) return data = {'name': window_name} self._driver.execute(Command.SWITCH_TO_WINDOW, data)
[ "def", "window", "(", "self", ",", "window_name", ")", ":", "if", "self", ".", "_driver", ".", "w3c", ":", "self", ".", "_w3c_window", "(", "window_name", ")", "return", "data", "=", "{", "'name'", ":", "window_name", "}", "self", ".", "_driver", ".", "execute", "(", "Command", ".", "SWITCH_TO_WINDOW", ",", "data", ")" ]
Switches focus to the specified window. :Args: - window_name: The name or window handle of the window to switch to. :Usage: :: driver.switch_to.window('main')
[ "Switches", "focus", "to", "the", "specified", "window", "." ]
python
train
26.411765
google/grr
grr/server/grr_response_server/client_report_utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/client_report_utils.py#L196-L240
def _FetchMostRecentGraphSeriesFromTheLegacyDB( label, report_type, token = None ): """Fetches the latest graph-series for a client label from the legacy DB. Args: label: Client label to fetch data for. report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for. token: ACL token to use for reading from the DB. Raises: AFF4AttributeTypeError: If an unexpected report-data type is encountered. Returns: The graph series for the given label and report type that was last written to the DB, or None if no series for that label and report-type exist. """ try: stats_for_label = aff4.FACTORY.Open( GetAFF4ClientReportsURN().Add(label), aff4_type=aff4_stats.ClientFleetStats, mode="r", token=token) except aff4.InstantiationError: # Nothing to return for the given label and report-type. return None aff4_attr = _GetAFF4AttributeForReportType(report_type) graph_series = rdf_stats.ClientGraphSeries(report_type=report_type) if aff4_attr.attribute_type == rdf_stats.GraphSeries: graphs = stats_for_label.Get(aff4_attr) if graphs is None: return None for graph in graphs: graph_series.graphs.Append(graph) elif aff4_attr.attribute_type == rdf_stats.Graph: graph = stats_for_label.Get(aff4_attr) if graph is None: return None graph_series.graphs.Append(graph) else: raise AFF4AttributeTypeError(aff4_attr.attribute_type) return graph_series
[ "def", "_FetchMostRecentGraphSeriesFromTheLegacyDB", "(", "label", ",", "report_type", ",", "token", "=", "None", ")", ":", "try", ":", "stats_for_label", "=", "aff4", ".", "FACTORY", ".", "Open", "(", "GetAFF4ClientReportsURN", "(", ")", ".", "Add", "(", "label", ")", ",", "aff4_type", "=", "aff4_stats", ".", "ClientFleetStats", ",", "mode", "=", "\"r\"", ",", "token", "=", "token", ")", "except", "aff4", ".", "InstantiationError", ":", "# Nothing to return for the given label and report-type.", "return", "None", "aff4_attr", "=", "_GetAFF4AttributeForReportType", "(", "report_type", ")", "graph_series", "=", "rdf_stats", ".", "ClientGraphSeries", "(", "report_type", "=", "report_type", ")", "if", "aff4_attr", ".", "attribute_type", "==", "rdf_stats", ".", "GraphSeries", ":", "graphs", "=", "stats_for_label", ".", "Get", "(", "aff4_attr", ")", "if", "graphs", "is", "None", ":", "return", "None", "for", "graph", "in", "graphs", ":", "graph_series", ".", "graphs", ".", "Append", "(", "graph", ")", "elif", "aff4_attr", ".", "attribute_type", "==", "rdf_stats", ".", "Graph", ":", "graph", "=", "stats_for_label", ".", "Get", "(", "aff4_attr", ")", "if", "graph", "is", "None", ":", "return", "None", "graph_series", ".", "graphs", ".", "Append", "(", "graph", ")", "else", ":", "raise", "AFF4AttributeTypeError", "(", "aff4_attr", ".", "attribute_type", ")", "return", "graph_series" ]
Fetches the latest graph-series for a client label from the legacy DB. Args: label: Client label to fetch data for. report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for. token: ACL token to use for reading from the DB. Raises: AFF4AttributeTypeError: If an unexpected report-data type is encountered. Returns: The graph series for the given label and report type that was last written to the DB, or None if no series for that label and report-type exist.
[ "Fetches", "the", "latest", "graph", "-", "series", "for", "a", "client", "label", "from", "the", "legacy", "DB", "." ]
python
train
32.4
markbaas/python-iresolve
iresolve.py
https://github.com/markbaas/python-iresolve/blob/ba91e37221e91265e4ac5dbc6e8f5cffa955a04f/iresolve.py#L37-L46
def suppress_output(reverse=False): """ Suppress output """ if reverse: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ else: sys.stdout = os.devnull sys.stderr = os.devnull
[ "def", "suppress_output", "(", "reverse", "=", "False", ")", ":", "if", "reverse", ":", "sys", ".", "stdout", "=", "sys", ".", "__stdout__", "sys", ".", "stderr", "=", "sys", ".", "__stderr__", "else", ":", "sys", ".", "stdout", "=", "os", ".", "devnull", "sys", ".", "stderr", "=", "os", ".", "devnull" ]
Suppress output
[ "Suppress", "output" ]
python
train
22.4
saltstack/salt
salt/modules/chocolatey.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/chocolatey.py#L614-L647
def install_python(name, version=None, install_args=None, override_args=False): ''' Instructs Chocolatey to install a package via Python's easy_install. name The name of the package to be installed. Only accepts a single argument. version Install a specific version of the package. Defaults to latest version available. install_args A list of install arguments you want to pass to the installation process i.e product key or feature list override_args Set to true if you want to override the original install arguments (for the native installer) in the package and use your own. When this is set to False install_args will be appended to the end of the default arguments CLI Example: .. code-block:: bash salt '*' chocolatey.install_python <package name> salt '*' chocolatey.install_python <package name> version=<package version> salt '*' chocolatey.install_python <package name> install_args=<args> override_args=True ''' return install(name, version=version, source='python', install_args=install_args, override_args=override_args)
[ "def", "install_python", "(", "name", ",", "version", "=", "None", ",", "install_args", "=", "None", ",", "override_args", "=", "False", ")", ":", "return", "install", "(", "name", ",", "version", "=", "version", ",", "source", "=", "'python'", ",", "install_args", "=", "install_args", ",", "override_args", "=", "override_args", ")" ]
Instructs Chocolatey to install a package via Python's easy_install. name The name of the package to be installed. Only accepts a single argument. version Install a specific version of the package. Defaults to latest version available. install_args A list of install arguments you want to pass to the installation process i.e product key or feature list override_args Set to true if you want to override the original install arguments (for the native installer) in the package and use your own. When this is set to False install_args will be appended to the end of the default arguments CLI Example: .. code-block:: bash salt '*' chocolatey.install_python <package name> salt '*' chocolatey.install_python <package name> version=<package version> salt '*' chocolatey.install_python <package name> install_args=<args> override_args=True
[ "Instructs", "Chocolatey", "to", "install", "a", "package", "via", "Python", "s", "easy_install", "." ]
python
train
35.882353