code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def save_model(self, request, obj, form, change):
"""
Sends a tweet with the title/short_url if applicable.
"""
super(TweetableAdminMixin, self).save_model(request, obj, form, change)
if Api and request.POST.get("send_tweet", False):
auth_settings = get_auth_settings()
obj.set_short_url()
message = truncatechars(obj, 140 - len(obj.short_url) - 1)
api = Api(*auth_settings)
api.PostUpdate("%s %s" % (message, obj.short_url)) | Sends a tweet with the title/short_url if applicable. | Below is the the instruction that describes the task:
### Input:
Sends a tweet with the title/short_url if applicable.
### Response:
def save_model(self, request, obj, form, change):
"""
Sends a tweet with the title/short_url if applicable.
"""
super(TweetableAdminMixin, self).save_model(request, obj, form, change)
if Api and request.POST.get("send_tweet", False):
auth_settings = get_auth_settings()
obj.set_short_url()
message = truncatechars(obj, 140 - len(obj.short_url) - 1)
api = Api(*auth_settings)
api.PostUpdate("%s %s" % (message, obj.short_url)) |
def add_context_action(self, action):
"""
Adds a custom context menu action
:param action: action to add.
"""
self.main_tab_widget.context_actions.append(action)
for child_splitter in self.child_splitters:
child_splitter.add_context_action(action) | Adds a custom context menu action
:param action: action to add. | Below is the the instruction that describes the task:
### Input:
Adds a custom context menu action
:param action: action to add.
### Response:
def add_context_action(self, action):
"""
Adds a custom context menu action
:param action: action to add.
"""
self.main_tab_widget.context_actions.append(action)
for child_splitter in self.child_splitters:
child_splitter.add_context_action(action) |
def capture_update_from_model(cls, table_name, record_id, *, update_fields=()):
"""
Create a fresh update record from the current model state in the database.
For read-write connected models, this will lead to the attempted update of the values of
a corresponding object in Salesforce.
Args:
table_name (str): The name of the table backing the connected model (without schema)
record_id (int): The primary id of the connected model
update_fields (Iterable[str]): If given, the names of fields that will be included in
the write record
Returns:
A list of the created TriggerLog entries (usually one).
Raises:
LookupError: if ``table_name`` does not belong to a connected model
"""
include_cols = ()
if update_fields:
model_cls = get_connected_model_for_table_name(table_name)
include_cols = cls._fieldnames_to_colnames(model_cls, update_fields)
raw_query = sql.SQL("""
SELECT {schema}.hc_capture_update_from_row(
hstore({schema}.{table_name}.*),
%(table_name)s,
ARRAY[{include_cols}]::text[] -- cast to type expected by stored procedure
) AS id
FROM {schema}.{table_name}
WHERE id = %(record_id)s
""").format(
schema=sql.Identifier(settings.HEROKU_CONNECT_SCHEMA),
table_name=sql.Identifier(table_name),
include_cols=sql.SQL(', ').join(sql.Identifier(col) for col in include_cols),
)
params = {'record_id': record_id, 'table_name': table_name}
result_qs = TriggerLog.objects.raw(raw_query, params)
return list(result_qs) | Create a fresh update record from the current model state in the database.
For read-write connected models, this will lead to the attempted update of the values of
a corresponding object in Salesforce.
Args:
table_name (str): The name of the table backing the connected model (without schema)
record_id (int): The primary id of the connected model
update_fields (Iterable[str]): If given, the names of fields that will be included in
the write record
Returns:
A list of the created TriggerLog entries (usually one).
Raises:
LookupError: if ``table_name`` does not belong to a connected model | Below is the the instruction that describes the task:
### Input:
Create a fresh update record from the current model state in the database.
For read-write connected models, this will lead to the attempted update of the values of
a corresponding object in Salesforce.
Args:
table_name (str): The name of the table backing the connected model (without schema)
record_id (int): The primary id of the connected model
update_fields (Iterable[str]): If given, the names of fields that will be included in
the write record
Returns:
A list of the created TriggerLog entries (usually one).
Raises:
LookupError: if ``table_name`` does not belong to a connected model
### Response:
def capture_update_from_model(cls, table_name, record_id, *, update_fields=()):
"""
Create a fresh update record from the current model state in the database.
For read-write connected models, this will lead to the attempted update of the values of
a corresponding object in Salesforce.
Args:
table_name (str): The name of the table backing the connected model (without schema)
record_id (int): The primary id of the connected model
update_fields (Iterable[str]): If given, the names of fields that will be included in
the write record
Returns:
A list of the created TriggerLog entries (usually one).
Raises:
LookupError: if ``table_name`` does not belong to a connected model
"""
include_cols = ()
if update_fields:
model_cls = get_connected_model_for_table_name(table_name)
include_cols = cls._fieldnames_to_colnames(model_cls, update_fields)
raw_query = sql.SQL("""
SELECT {schema}.hc_capture_update_from_row(
hstore({schema}.{table_name}.*),
%(table_name)s,
ARRAY[{include_cols}]::text[] -- cast to type expected by stored procedure
) AS id
FROM {schema}.{table_name}
WHERE id = %(record_id)s
""").format(
schema=sql.Identifier(settings.HEROKU_CONNECT_SCHEMA),
table_name=sql.Identifier(table_name),
include_cols=sql.SQL(', ').join(sql.Identifier(col) for col in include_cols),
)
params = {'record_id': record_id, 'table_name': table_name}
result_qs = TriggerLog.objects.raw(raw_query, params)
return list(result_qs) |
def defvalkey(js, key, default=None, take_none=True):
"""
Returns js[key] if set, otherwise default. Note js[key] can be None.
:param js:
:param key:
:param default:
:param take_none:
:return:
"""
if js is None:
return default
if key not in js:
return default
if js[key] is None and not take_none:
return default
return js[key] | Returns js[key] if set, otherwise default. Note js[key] can be None.
:param js:
:param key:
:param default:
:param take_none:
:return: | Below is the the instruction that describes the task:
### Input:
Returns js[key] if set, otherwise default. Note js[key] can be None.
:param js:
:param key:
:param default:
:param take_none:
:return:
### Response:
def defvalkey(js, key, default=None, take_none=True):
"""
Returns js[key] if set, otherwise default. Note js[key] can be None.
:param js:
:param key:
:param default:
:param take_none:
:return:
"""
if js is None:
return default
if key not in js:
return default
if js[key] is None and not take_none:
return default
return js[key] |
def xsl_elements(self):
"""Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements"""
def append_xsl_elements(xsl_elements, r, xsl):
if r is not None:
r.xpath('.//w:t', namespaces=self.namespaces)[0].text = xsl
xe = XslElement(r, logger=self.logger)
xsl_elements.append(xe)
return None, ''
if not getattr(self, '_xsl_elements', None):
xsl_elements = []
for p in self.root.xpath('.//w:p', namespaces=self.namespaces):
xsl_r, xsl = None, ''
for r in p:
# find first XSL run and add all XSL meta text
text = ''.join(t.text for t in r.xpath('.//w:t', namespaces=self.namespaces))
if r.xpath('.//w:rPr/w:rStyle[@w:val="%s"]' % self.style, namespaces=self.namespaces):
xsl += text
if xsl_r is None and text:
xsl_r = r
else:
r.getparent().remove(r)
elif text:
xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl)
xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl)
self._xsl_elements = xsl_elements
return self._xsl_elements | Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements | Below is the the instruction that describes the task:
### Input:
Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements
### Response:
def xsl_elements(self):
"""Find all "XSL" styled runs, normalize related paragraph and returns list of XslElements"""
def append_xsl_elements(xsl_elements, r, xsl):
if r is not None:
r.xpath('.//w:t', namespaces=self.namespaces)[0].text = xsl
xe = XslElement(r, logger=self.logger)
xsl_elements.append(xe)
return None, ''
if not getattr(self, '_xsl_elements', None):
xsl_elements = []
for p in self.root.xpath('.//w:p', namespaces=self.namespaces):
xsl_r, xsl = None, ''
for r in p:
# find first XSL run and add all XSL meta text
text = ''.join(t.text for t in r.xpath('.//w:t', namespaces=self.namespaces))
if r.xpath('.//w:rPr/w:rStyle[@w:val="%s"]' % self.style, namespaces=self.namespaces):
xsl += text
if xsl_r is None and text:
xsl_r = r
else:
r.getparent().remove(r)
elif text:
xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl)
xsl_r, xsl = append_xsl_elements(xsl_elements, xsl_r, xsl)
self._xsl_elements = xsl_elements
return self._xsl_elements |
def set_updated(self):
"""
Mark the module as updated.
We check if the actual content has changed and if so we trigger an
update in py3status.
"""
# get latest output
output = []
for method in self.methods.values():
data = method["last_output"]
if isinstance(data, list):
if self.testing and data:
data[0]["cached_until"] = method.get("cached_until")
output.extend(data)
else:
# if the output is not 'valid' then don't add it.
if data.get("full_text") or "separator" in data:
if self.testing:
data["cached_until"] = method.get("cached_until")
output.append(data)
# if changed store and force display update.
if output != self.last_output:
# has the modules output become urgent?
# we only care the update that this happens
# not any after then.
urgent = True in [x.get("urgent") for x in output]
if urgent != self.urgent:
self.urgent = urgent
else:
urgent = False
self.last_output = output
self._py3_wrapper.notify_update(self.module_full_name, urgent) | Mark the module as updated.
We check if the actual content has changed and if so we trigger an
update in py3status. | Below is the the instruction that describes the task:
### Input:
Mark the module as updated.
We check if the actual content has changed and if so we trigger an
update in py3status.
### Response:
def set_updated(self):
"""
Mark the module as updated.
We check if the actual content has changed and if so we trigger an
update in py3status.
"""
# get latest output
output = []
for method in self.methods.values():
data = method["last_output"]
if isinstance(data, list):
if self.testing and data:
data[0]["cached_until"] = method.get("cached_until")
output.extend(data)
else:
# if the output is not 'valid' then don't add it.
if data.get("full_text") or "separator" in data:
if self.testing:
data["cached_until"] = method.get("cached_until")
output.append(data)
# if changed store and force display update.
if output != self.last_output:
# has the modules output become urgent?
# we only care the update that this happens
# not any after then.
urgent = True in [x.get("urgent") for x in output]
if urgent != self.urgent:
self.urgent = urgent
else:
urgent = False
self.last_output = output
self._py3_wrapper.notify_update(self.module_full_name, urgent) |
def projScatter(lon, lat, **kwargs):
"""
Create a scatter plot on HEALPix projected axes.
Inputs: lon (deg), lat (deg)
"""
hp.projscatter(lon, lat, lonlat=True, **kwargs) | Create a scatter plot on HEALPix projected axes.
Inputs: lon (deg), lat (deg) | Below is the the instruction that describes the task:
### Input:
Create a scatter plot on HEALPix projected axes.
Inputs: lon (deg), lat (deg)
### Response:
def projScatter(lon, lat, **kwargs):
"""
Create a scatter plot on HEALPix projected axes.
Inputs: lon (deg), lat (deg)
"""
hp.projscatter(lon, lat, lonlat=True, **kwargs) |
async def set_custom_eq(self, target: str, value: str) -> None:
"""Set custom EQ settings."""
params = {"settings": [{"target": target, "value": value}]}
return await self.services["audio"]["setCustomEqualizerSettings"](params) | Set custom EQ settings. | Below is the the instruction that describes the task:
### Input:
Set custom EQ settings.
### Response:
async def set_custom_eq(self, target: str, value: str) -> None:
"""Set custom EQ settings."""
params = {"settings": [{"target": target, "value": value}]}
return await self.services["audio"]["setCustomEqualizerSettings"](params) |
def create_hit(MaxAssignments=None, AutoApprovalDelayInSeconds=None, LifetimeInSeconds=None, AssignmentDurationInSeconds=None, Reward=None, Title=None, Keywords=None, Description=None, Question=None, RequesterAnnotation=None, QualificationRequirements=None, UniqueRequestToken=None, AssignmentReviewPolicy=None, HITReviewPolicy=None, HITLayoutId=None, HITLayoutParameters=None):
"""
The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) --
"""
pass | The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) -- | Below is the the instruction that describes the task:
### Input:
The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) --
### Response:
def create_hit(MaxAssignments=None, AutoApprovalDelayInSeconds=None, LifetimeInSeconds=None, AssignmentDurationInSeconds=None, Reward=None, Title=None, Keywords=None, Description=None, Question=None, RequesterAnnotation=None, QualificationRequirements=None, UniqueRequestToken=None, AssignmentReviewPolicy=None, HITReviewPolicy=None, HITLayoutId=None, HITLayoutParameters=None):
"""
The CreateHIT operation creates a new Human Intelligence Task (HIT). The new HIT is made available for Workers to find and accept on the Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for the properties of the HIT, such as its title, reward amount and number of assignments. When you pass these values to CreateHIT , a new HIT is created for you, with a new HITTypeID . The HITTypeID can be used to create additional HITs in the future without needing to specify common parameters such as the title, description and reward amount each time.
An alternative way to create HITs is to first generate a HITTypeID using the CreateHITType operation and then call the CreateHITWithHITType operation. This is the recommended best practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by providing a value for the Question parameter that fully specifies the contents of the HIT, or by providing a HitLayoutId and associated HitLayoutParameters .
See also: AWS API Documentation
:example: response = client.create_hit(
MaxAssignments=123,
AutoApprovalDelayInSeconds=123,
LifetimeInSeconds=123,
AssignmentDurationInSeconds=123,
Reward='string',
Title='string',
Keywords='string',
Description='string',
Question='string',
RequesterAnnotation='string',
QualificationRequirements=[
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
UniqueRequestToken='string',
AssignmentReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITReviewPolicy={
'PolicyName': 'string',
'Parameters': [
{
'Key': 'string',
'Values': [
'string',
],
'MapEntries': [
{
'Key': 'string',
'Values': [
'string',
]
},
]
},
]
},
HITLayoutId='string',
HITLayoutParameters=[
{
'Name': 'string',
'Value': 'string'
},
]
)
:type MaxAssignments: integer
:param MaxAssignments: The number of times the HIT can be accepted and completed before the HIT becomes unavailable.
:type AutoApprovalDelayInSeconds: integer
:param AutoApprovalDelayInSeconds: The number of seconds after an assignment for the HIT has been submitted, after which the assignment is considered Approved automatically unless the Requester explicitly rejects it.
:type LifetimeInSeconds: integer
:param LifetimeInSeconds: [REQUIRED]
An amount of time, in seconds, after which the HIT is no longer available for users to accept. After the lifetime of the HIT elapses, the HIT no longer appears in HIT searches, even if not all of the assignments for the HIT have been accepted.
:type AssignmentDurationInSeconds: integer
:param AssignmentDurationInSeconds: [REQUIRED]
The amount of time, in seconds, that a Worker has to complete the HIT after accepting it. If a Worker does not complete the assignment within the specified duration, the assignment is considered abandoned. If the HIT is still active (that is, its lifetime has not elapsed), the assignment becomes available for other users to find and accept.
:type Reward: string
:param Reward: [REQUIRED]
The amount of money the Requester will pay a Worker for successfully completing the HIT.
:type Title: string
:param Title: [REQUIRED]
The title of the HIT. A title should be short and descriptive about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT title appears in search results, and everywhere the HIT is mentioned.
:type Keywords: string
:param Keywords: One or more words or phrases that describe the HIT, separated by commas. These words are used in searches to find HITs.
:type Description: string
:param Description: [REQUIRED]
A general description of the HIT. A description includes detailed information about the kind of task the HIT contains. On the Amazon Mechanical Turk web site, the HIT description appears in the expanded view of search results, and in the HIT and assignment screens. A good description gives the user enough information to evaluate the HIT before accepting it.
:type Question: string
:param Question: The data the person completing the HIT uses to produce the results.
Constraints: Must be a QuestionForm data structure, an ExternalQuestion data structure, or an HTMLQuestion data structure. The XML question data must not be larger than 64 kilobytes (65,535 bytes) in size, including whitespace.
Either a Question parameter or a HITLayoutId parameter must be provided.
:type RequesterAnnotation: string
:param RequesterAnnotation: An arbitrary data field. The RequesterAnnotation parameter lets your application attach arbitrary data to the HIT for tracking purposes. For example, this parameter could be an identifier internal to the Requester's application that corresponds with the HIT.
The RequesterAnnotation parameter for a HIT is only visible to the Requester who created the HIT. It is not shown to the Worker, or any other Requester.
The RequesterAnnotation parameter may be different for each HIT you submit. It does not affect how your HITs are grouped.
:type QualificationRequirements: list
:param QualificationRequirements: A condition that a Worker's Qualifications must meet before the Worker is allowed to accept and complete the HIT.
(dict) --The QualificationRequirement data structure describes a Qualification that a Worker must have before the Worker is allowed to accept a HIT. A requirement may optionally state that a Worker must have the Qualification in order to preview the HIT.
QualificationTypeId (string) -- [REQUIRED]The ID of the Qualification type for the requirement.
Comparator (string) -- [REQUIRED]The kind of comparison to make against a Qualification's value. You can compare a Qualification's value to an IntegerValue to see if it is LessThan, LessThanOrEqualTo, GreaterThan, GreaterThanOrEqualTo, EqualTo, or NotEqualTo the IntegerValue. You can compare it to a LocaleValue to see if it is EqualTo, or NotEqualTo the LocaleValue. You can check to see if the value is In or NotIn a set of IntegerValue or LocaleValue values. Lastly, a Qualification requirement can also test if a Qualification Exists or DoesNotExist in the user's profile, regardless of its value.
IntegerValues (list) --The integer value to compare against the Qualification's value. IntegerValue must not be present if Comparator is Exists or DoesNotExist. IntegerValue can only be used if the Qualification type has an integer value; it cannot be used with the Worker_Locale QualificationType ID. When performing a set comparison by using the In or the NotIn comparator, you can use up to 15 IntegerValue elements in a QualificationRequirement data structure.
(integer) --
LocaleValues (list) --The locale value to compare against the Qualification's value. The local value must be a valid ISO 3166 country code or supports ISO 3166-2 subdivisions. LocaleValue can only be used with a Worker_Locale QualificationType ID. LocaleValue can only be used with the EqualTo, NotEqualTo, In, and NotIn comparators. You must only use a single LocaleValue element when using the EqualTo or NotEqualTo comparators. When performing a set comparison by using the In or the NotIn comparator, you can use up to 30 LocaleValue elements in a QualificationRequirement data structure.
(dict) --The Locale data structure represents a geographical region or location.
Country (string) -- [REQUIRED]The country of the locale. Must be a valid ISO 3166 country code. For example, the code US refers to the United States of America.
Subdivision (string) --The state or subdivision of the locale. A valid ISO 3166-2 subdivision code. For example, the code WA refers to the state of Washington.
RequiredToPreview (boolean) --If true, the question data for the HIT will not be shown when a Worker whose Qualifications do not meet this requirement tries to preview the HIT. That is, a Worker's Qualifications must meet all of the requirements for which RequiredToPreview is true in order to preview the HIT. If a Worker meets all of the requirements where RequiredToPreview is true (or if there are no such requirements), but does not meet all of the requirements for the HIT, the Worker will be allowed to preview the HIT's question data, but will not be allowed to accept and complete the HIT. The default is false.
:type UniqueRequestToken: string
:param UniqueRequestToken: A unique identifier for this request which allows you to retry the call on error without creating duplicate HITs. This is useful in cases such as network timeouts where it is unclear whether or not the call succeeded on the server. If the HIT already exists in the system from a previous call using the same UniqueRequestToken, subsequent calls will return a AWS.MechanicalTurk.HitAlreadyExists error with a message containing the HITId.
Note
Note: It is your responsibility to ensure uniqueness of the token. The unique token expires after 24 hours. Subsequent calls using the same UniqueRequestToken made after the 24 hour limit could create duplicate HITs.
:type AssignmentReviewPolicy: dict
:param AssignmentReviewPolicy: The Assignment-level Review Policy applies to the assignments under the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITReviewPolicy: dict
:param HITReviewPolicy: The HIT-level Review Policy applies to the HIT. You can specify for Mechanical Turk to take various actions based on the policy.
PolicyName (string) --Name of a Review Policy: SimplePlurality/2011-09-01 or ScoreMyKnownAnswers/2011-09-01
Parameters (list) --Name of the parameter from the Review policy.
(dict) --Name of the parameter from the Review policy.
Key (string) --Name of the parameter from the list of Review Polices.
Values (list) --The list of values of the Parameter
(string) --
MapEntries (list) --List of ParameterMapEntry objects.
(dict) --This data structure is the data type for the AnswerKey parameter of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Key (string) --The QuestionID from the HIT that is used to identify which question requires Mechanical Turk to score as part of the ScoreMyKnownAnswers/2011-09-01 Review Policy.
Values (list) --The list of answers to the question specified in the MapEntry Key element. The Worker must match all values in order for the answer to be scored correctly.
(string) --
:type HITLayoutId: string
:param HITLayoutId: The HITLayoutId allows you to use a pre-existing HIT design with placeholder values and create an additional HIT by providing those values as HITLayoutParameters.
Constraints: Either a Question parameter or a HITLayoutId parameter must be provided.
:type HITLayoutParameters: list
:param HITLayoutParameters: If the HITLayoutId is provided, any placeholder values must be filled in with values using the HITLayoutParameter structure. For more information, see HITLayout.
(dict) --The HITLayoutParameter data structure defines parameter values used with a HITLayout. A HITLayout is a reusable Amazon Mechanical Turk project template used to provide Human Intelligence Task (HIT) question data for CreateHIT.
Name (string) --The name of the parameter in the HITLayout.
Value (string) --The value substituted for the parameter referenced in the HITLayout.
:rtype: dict
:return: {
'HIT': {
'HITId': 'string',
'HITTypeId': 'string',
'HITGroupId': 'string',
'HITLayoutId': 'string',
'CreationTime': datetime(2015, 1, 1),
'Title': 'string',
'Description': 'string',
'Question': 'string',
'Keywords': 'string',
'HITStatus': 'Assignable'|'Unassignable'|'Reviewable'|'Reviewing'|'Disposed',
'MaxAssignments': 123,
'Reward': 'string',
'AutoApprovalDelayInSeconds': 123,
'Expiration': datetime(2015, 1, 1),
'AssignmentDurationInSeconds': 123,
'RequesterAnnotation': 'string',
'QualificationRequirements': [
{
'QualificationTypeId': 'string',
'Comparator': 'LessThan'|'LessThanOrEqualTo'|'GreaterThan'|'GreaterThanOrEqualTo'|'EqualTo'|'NotEqualTo'|'Exists'|'DoesNotExist'|'In'|'NotIn',
'IntegerValues': [
123,
],
'LocaleValues': [
{
'Country': 'string',
'Subdivision': 'string'
},
],
'RequiredToPreview': True|False
},
],
'HITReviewStatus': 'NotReviewed'|'MarkedForReview'|'ReviewedAppropriate'|'ReviewedInappropriate',
'NumberOfAssignmentsPending': 123,
'NumberOfAssignmentsAvailable': 123,
'NumberOfAssignmentsCompleted': 123
}
}
:returns:
(integer) --
"""
pass |
def register_service(cls, primary_key_type):
"""Register an API service endpoint.
:param cls: The class to register
:param str primary_key_type: The type (as a string) of the primary_key
field
"""
view_func = cls.as_view(cls.__name__.lower()) # pylint: disable=no-member
methods = set(cls.__model__.__methods__) # pylint: disable=no-member
if 'GET' in methods: # pylint: disable=no-member
current_app.add_url_rule(
cls.__model__.__url__ + '/', defaults={'resource_id': None},
view_func=view_func,
methods=['GET'])
current_app.add_url_rule(
'{resource}/meta'.format(resource=cls.__model__.__url__),
view_func=view_func,
methods=['GET'])
if 'POST' in methods: # pylint: disable=no-member
current_app.add_url_rule(
cls.__model__.__url__ + '/', view_func=view_func, methods=['POST', ])
current_app.add_url_rule(
'{resource}/<{pk_type}:{pk}>'.format(
resource=cls.__model__.__url__,
pk='resource_id', pk_type=primary_key_type),
view_func=view_func,
methods=methods - {'POST'})
current_app.classes.append(cls) | Register an API service endpoint.
:param cls: The class to register
:param str primary_key_type: The type (as a string) of the primary_key
field | Below is the the instruction that describes the task:
### Input:
Register an API service endpoint.
:param cls: The class to register
:param str primary_key_type: The type (as a string) of the primary_key
field
### Response:
def register_service(cls, primary_key_type):
"""Register an API service endpoint.
:param cls: The class to register
:param str primary_key_type: The type (as a string) of the primary_key
field
"""
view_func = cls.as_view(cls.__name__.lower()) # pylint: disable=no-member
methods = set(cls.__model__.__methods__) # pylint: disable=no-member
if 'GET' in methods: # pylint: disable=no-member
current_app.add_url_rule(
cls.__model__.__url__ + '/', defaults={'resource_id': None},
view_func=view_func,
methods=['GET'])
current_app.add_url_rule(
'{resource}/meta'.format(resource=cls.__model__.__url__),
view_func=view_func,
methods=['GET'])
if 'POST' in methods: # pylint: disable=no-member
current_app.add_url_rule(
cls.__model__.__url__ + '/', view_func=view_func, methods=['POST', ])
current_app.add_url_rule(
'{resource}/<{pk_type}:{pk}>'.format(
resource=cls.__model__.__url__,
pk='resource_id', pk_type=primary_key_type),
view_func=view_func,
methods=methods - {'POST'})
current_app.classes.append(cls) |
def cat_extract(tar, member, targetpath):
"""Extract a regular file member using cat for async-like I/O
Mostly adapted from tarfile.py.
"""
assert member.isreg()
# Fetch the TarInfo object for the given name and build the
# destination pathname, replacing forward slashes to platform
# specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
try:
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
except EnvironmentError as e:
if e.errno == errno.EEXIST:
# Ignore an error caused by the race of
# the directory being created between the
# check for the path and the creation.
pass
else:
raise
with files.DeleteOnError(targetpath) as dest:
with pipeline.get_cat_pipeline(pipeline.PIPE, dest.f) as pl:
fp = tar.extractfile(member)
copyfileobj.copyfileobj(fp, pl.stdin)
if sys.version_info < (3, 5):
tar.chown(member, targetpath)
else:
tar.chown(member, targetpath, False)
tar.chmod(member, targetpath)
tar.utime(member, targetpath) | Extract a regular file member using cat for async-like I/O
Mostly adapted from tarfile.py. | Below is the the instruction that describes the task:
### Input:
Extract a regular file member using cat for async-like I/O
Mostly adapted from tarfile.py.
### Response:
def cat_extract(tar, member, targetpath):
"""Extract a regular file member using cat for async-like I/O
Mostly adapted from tarfile.py.
"""
assert member.isreg()
# Fetch the TarInfo object for the given name and build the
# destination pathname, replacing forward slashes to platform
# specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
try:
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
except EnvironmentError as e:
if e.errno == errno.EEXIST:
# Ignore an error caused by the race of
# the directory being created between the
# check for the path and the creation.
pass
else:
raise
with files.DeleteOnError(targetpath) as dest:
with pipeline.get_cat_pipeline(pipeline.PIPE, dest.f) as pl:
fp = tar.extractfile(member)
copyfileobj.copyfileobj(fp, pl.stdin)
if sys.version_info < (3, 5):
tar.chown(member, targetpath)
else:
tar.chown(member, targetpath, False)
tar.chmod(member, targetpath)
tar.utime(member, targetpath) |
def _merge_constraints(constraints, overrides):
"""Merge the constraints avoiding duplicates
Change constraints in place.
"""
for o in overrides:
i = 0
while i < len(constraints):
c = constraints[i]
if _same(o, c):
constraints[i].update(o)
break
i = i + 1 | Merge the constraints avoiding duplicates
Change constraints in place. | Below is the the instruction that describes the task:
### Input:
Merge the constraints avoiding duplicates
Change constraints in place.
### Response:
def _merge_constraints(constraints, overrides):
"""Merge the constraints avoiding duplicates
Change constraints in place.
"""
for o in overrides:
i = 0
while i < len(constraints):
c = constraints[i]
if _same(o, c):
constraints[i].update(o)
break
i = i + 1 |
def densenet121(num_classes=1000, pretrained='imagenet'):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet121(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet121'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model | r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` | Below is the the instruction that describes the task:
### Input:
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
### Response:
def densenet121(num_classes=1000, pretrained='imagenet'):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = models.densenet121(pretrained=False)
if pretrained is not None:
settings = pretrained_settings['densenet121'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model |
def get_preferred_partition(self, broker, sibling_distance):
"""The preferred partition belongs to the topic with the minimum
(also negative) distance between destination and source.
:param broker: Destination broker
:param sibling_distance: dict {topic: distance} negative distance should
mean that destination broker has got less partition of a certain topic
than source self.
:returns: A partition or None if no eligible partitions are available
"""
# Only partitions not having replica in broker are valid
# Get best fit partition, based on avoiding partition from same topic
# and partition with least siblings in destination-broker.
eligible_partitions = self.partitions - broker.partitions
if eligible_partitions:
pref_partition = min(
eligible_partitions,
key=lambda source_partition:
sibling_distance[source_partition.topic],
)
return pref_partition
else:
return None | The preferred partition belongs to the topic with the minimum
(also negative) distance between destination and source.
:param broker: Destination broker
:param sibling_distance: dict {topic: distance} negative distance should
mean that destination broker has got less partition of a certain topic
than source self.
:returns: A partition or None if no eligible partitions are available | Below is the the instruction that describes the task:
### Input:
The preferred partition belongs to the topic with the minimum
(also negative) distance between destination and source.
:param broker: Destination broker
:param sibling_distance: dict {topic: distance} negative distance should
mean that destination broker has got less partition of a certain topic
than source self.
:returns: A partition or None if no eligible partitions are available
### Response:
def get_preferred_partition(self, broker, sibling_distance):
"""The preferred partition belongs to the topic with the minimum
(also negative) distance between destination and source.
:param broker: Destination broker
:param sibling_distance: dict {topic: distance} negative distance should
mean that destination broker has got less partition of a certain topic
than source self.
:returns: A partition or None if no eligible partitions are available
"""
# Only partitions not having replica in broker are valid
# Get best fit partition, based on avoiding partition from same topic
# and partition with least siblings in destination-broker.
eligible_partitions = self.partitions - broker.partitions
if eligible_partitions:
pref_partition = min(
eligible_partitions,
key=lambda source_partition:
sibling_distance[source_partition.topic],
)
return pref_partition
else:
return None |
def audit(**kwargs):
"""
use this decorator to audit an operation
"""
def wrap(fn):
@functools.wraps(fn)
def advice(parent_object, *args, **kw):
request = parent_object.request
wijziging = request.audit_manager.create_revision()
result = fn(parent_object, *args, **kw)
if hasattr(request, 'user') and request.user is not None and 'actor' in request.user:
actor = request.user['actor']
attributes = request.user['attributes']
wijziging.updated_by = actor.get('uri', None)
if actor.get('uri') == actor.get('instantie_actor_uri'):
wijziging.updated_by_omschrijving = (
attributes.get('displayname')
or attributes.get('mail')
or actor.get('omschrijving'))
else:
wijziging.updated_by_omschrijving = actor.get(
'omschrijving')
else:
wijziging.updated_by = 'publiek'
wijziging.updated_by_omschrijving = 'publiek'
r_id = request.matchdict.get('id')
wijziging.resource_object_id = r_id
if result is not None:
try:
renderer_name = request.registry.settings.get(
'audit.pyramid.json.renderer',
'jsonrenderer')
json_string = renderers.render(renderer_name, result,
request=request)
result_object_json = json.loads(json_string)
wijziging.resource_object_json = result_object_json
wijziging.resource_object_id = _get_id_from_result(r_id, result_object_json, kwargs)
except Exception as e:
log.error(e)
wijziging.versie = _get_versie_hash(wijziging)
wijziging.actie = kwargs.get('actie') if kwargs.get('actie') else _action_from_request(request)
request.audit_manager.save(wijziging)
return result
return advice
return wrap | use this decorator to audit an operation | Below is the the instruction that describes the task:
### Input:
use this decorator to audit an operation
### Response:
def audit(**kwargs):
"""
use this decorator to audit an operation
"""
def wrap(fn):
@functools.wraps(fn)
def advice(parent_object, *args, **kw):
request = parent_object.request
wijziging = request.audit_manager.create_revision()
result = fn(parent_object, *args, **kw)
if hasattr(request, 'user') and request.user is not None and 'actor' in request.user:
actor = request.user['actor']
attributes = request.user['attributes']
wijziging.updated_by = actor.get('uri', None)
if actor.get('uri') == actor.get('instantie_actor_uri'):
wijziging.updated_by_omschrijving = (
attributes.get('displayname')
or attributes.get('mail')
or actor.get('omschrijving'))
else:
wijziging.updated_by_omschrijving = actor.get(
'omschrijving')
else:
wijziging.updated_by = 'publiek'
wijziging.updated_by_omschrijving = 'publiek'
r_id = request.matchdict.get('id')
wijziging.resource_object_id = r_id
if result is not None:
try:
renderer_name = request.registry.settings.get(
'audit.pyramid.json.renderer',
'jsonrenderer')
json_string = renderers.render(renderer_name, result,
request=request)
result_object_json = json.loads(json_string)
wijziging.resource_object_json = result_object_json
wijziging.resource_object_id = _get_id_from_result(r_id, result_object_json, kwargs)
except Exception as e:
log.error(e)
wijziging.versie = _get_versie_hash(wijziging)
wijziging.actie = kwargs.get('actie') if kwargs.get('actie') else _action_from_request(request)
request.audit_manager.save(wijziging)
return result
return advice
return wrap |
def reset_all_to_coefficients(self):
""" Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
"""
self.reset_to_coefficients()
[ee.reset_to_coefficients() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to coefficients")
return self | Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary. | Below is the the instruction that describes the task:
### Input:
Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
### Response:
def reset_all_to_coefficients(self):
""" Resets the IOSystem and all extensions to coefficients.
This method calls reset_to_coefficients for the IOSystem and for
all Extensions in the system
Note
-----
The system can not be reconstructed after this steps
because all absolute data is removed. Save the Y data in case
a reconstruction might be necessary.
"""
self.reset_to_coefficients()
[ee.reset_to_coefficients() for ee in self.get_extensions(data=True)]
self.meta._add_modify("Reset full system to coefficients")
return self |
def angle(self, center1_x, center1_y, center2_x, center2_y):
"""
compute the rotation angle of the dipole
:return:
"""
phi_G = np.arctan2(center2_y - center1_y, center2_x - center1_x)
return phi_G | compute the rotation angle of the dipole
:return: | Below is the the instruction that describes the task:
### Input:
compute the rotation angle of the dipole
:return:
### Response:
def angle(self, center1_x, center1_y, center2_x, center2_y):
"""
compute the rotation angle of the dipole
:return:
"""
phi_G = np.arctan2(center2_y - center1_y, center2_x - center1_x)
return phi_G |
def stream_via(self, reactor, host, port,
socks_endpoint,
use_tls=False):
"""
This returns an `IStreamClientEndpoint`_ that will connect to
the given ``host``, ``port`` via Tor -- and via this
parciular circuit.
We match the streams up using their source-ports, so even if
there are many streams in-flight to the same destination they
will align correctly. For example, to cause a stream to go to
``torproject.org:443`` via a particular circuit::
@inlineCallbacks
def main(reactor):
circ = yield torstate.build_circuit() # lets Tor decide the path
yield circ.when_built()
tor_ep = circ.stream_via(reactor, 'torproject.org', 443)
# 'factory' is for your protocol
proto = yield tor_ep.connect(factory)
Note that if you're doing client-side Web requests, you
probably want to use `treq
<http://treq.readthedocs.org/en/latest/>`_ or ``Agent``
directly so call :meth:`txtorcon.Circuit.web_agent` instead.
:param socks_endpoint: should be a Deferred firing a valid
IStreamClientEndpoint pointing at a Tor SOCKS port (or an
IStreamClientEndpoint already).
.. _istreamclientendpoint: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamClientEndpoint.html
"""
from .endpoints import TorClientEndpoint
ep = TorClientEndpoint(
host, port, socks_endpoint,
tls=use_tls,
reactor=reactor,
)
return TorCircuitEndpoint(reactor, self._torstate, self, ep) | This returns an `IStreamClientEndpoint`_ that will connect to
the given ``host``, ``port`` via Tor -- and via this
parciular circuit.
We match the streams up using their source-ports, so even if
there are many streams in-flight to the same destination they
will align correctly. For example, to cause a stream to go to
``torproject.org:443`` via a particular circuit::
@inlineCallbacks
def main(reactor):
circ = yield torstate.build_circuit() # lets Tor decide the path
yield circ.when_built()
tor_ep = circ.stream_via(reactor, 'torproject.org', 443)
# 'factory' is for your protocol
proto = yield tor_ep.connect(factory)
Note that if you're doing client-side Web requests, you
probably want to use `treq
<http://treq.readthedocs.org/en/latest/>`_ or ``Agent``
directly so call :meth:`txtorcon.Circuit.web_agent` instead.
:param socks_endpoint: should be a Deferred firing a valid
IStreamClientEndpoint pointing at a Tor SOCKS port (or an
IStreamClientEndpoint already).
.. _istreamclientendpoint: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamClientEndpoint.html | Below is the the instruction that describes the task:
### Input:
This returns an `IStreamClientEndpoint`_ that will connect to
the given ``host``, ``port`` via Tor -- and via this
parciular circuit.
We match the streams up using their source-ports, so even if
there are many streams in-flight to the same destination they
will align correctly. For example, to cause a stream to go to
``torproject.org:443`` via a particular circuit::
@inlineCallbacks
def main(reactor):
circ = yield torstate.build_circuit() # lets Tor decide the path
yield circ.when_built()
tor_ep = circ.stream_via(reactor, 'torproject.org', 443)
# 'factory' is for your protocol
proto = yield tor_ep.connect(factory)
Note that if you're doing client-side Web requests, you
probably want to use `treq
<http://treq.readthedocs.org/en/latest/>`_ or ``Agent``
directly so call :meth:`txtorcon.Circuit.web_agent` instead.
:param socks_endpoint: should be a Deferred firing a valid
IStreamClientEndpoint pointing at a Tor SOCKS port (or an
IStreamClientEndpoint already).
.. _istreamclientendpoint: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamClientEndpoint.html
### Response:
def stream_via(self, reactor, host, port,
socks_endpoint,
use_tls=False):
"""
This returns an `IStreamClientEndpoint`_ that will connect to
the given ``host``, ``port`` via Tor -- and via this
parciular circuit.
We match the streams up using their source-ports, so even if
there are many streams in-flight to the same destination they
will align correctly. For example, to cause a stream to go to
``torproject.org:443`` via a particular circuit::
@inlineCallbacks
def main(reactor):
circ = yield torstate.build_circuit() # lets Tor decide the path
yield circ.when_built()
tor_ep = circ.stream_via(reactor, 'torproject.org', 443)
# 'factory' is for your protocol
proto = yield tor_ep.connect(factory)
Note that if you're doing client-side Web requests, you
probably want to use `treq
<http://treq.readthedocs.org/en/latest/>`_ or ``Agent``
directly so call :meth:`txtorcon.Circuit.web_agent` instead.
:param socks_endpoint: should be a Deferred firing a valid
IStreamClientEndpoint pointing at a Tor SOCKS port (or an
IStreamClientEndpoint already).
.. _istreamclientendpoint: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamClientEndpoint.html
"""
from .endpoints import TorClientEndpoint
ep = TorClientEndpoint(
host, port, socks_endpoint,
tls=use_tls,
reactor=reactor,
)
return TorCircuitEndpoint(reactor, self._torstate, self, ep) |
def _get_logger_for_instance(self, instance: typing.Any) -> logging.Logger:
"""Get logger for log calls.
:param instance: Owner class instance. Filled only if instance created, else None.
:type instance: typing.Optional[owner]
:return: logger instance
:rtype: logging.Logger
"""
if self.logger is not None: # pylint: disable=no-else-return
return self.logger
elif hasattr(instance, "logger") and isinstance(instance.logger, logging.Logger):
return instance.logger
elif hasattr(instance, "log") and isinstance(instance.log, logging.Logger):
return instance.log
return _LOGGER | Get logger for log calls.
:param instance: Owner class instance. Filled only if instance created, else None.
:type instance: typing.Optional[owner]
:return: logger instance
:rtype: logging.Logger | Below is the the instruction that describes the task:
### Input:
Get logger for log calls.
:param instance: Owner class instance. Filled only if instance created, else None.
:type instance: typing.Optional[owner]
:return: logger instance
:rtype: logging.Logger
### Response:
def _get_logger_for_instance(self, instance: typing.Any) -> logging.Logger:
"""Get logger for log calls.
:param instance: Owner class instance. Filled only if instance created, else None.
:type instance: typing.Optional[owner]
:return: logger instance
:rtype: logging.Logger
"""
if self.logger is not None: # pylint: disable=no-else-return
return self.logger
elif hasattr(instance, "logger") and isinstance(instance.logger, logging.Logger):
return instance.logger
elif hasattr(instance, "log") and isinstance(instance.log, logging.Logger):
return instance.log
return _LOGGER |
def find_by_extension(extension):
"""
Find and return a format by extension.
:param extension: A string describing the extension of the format.
"""
for format in FORMATS:
if extension in format.extensions:
return format
raise UnknownFormat('No format found with extension "%s"' % extension) | Find and return a format by extension.
:param extension: A string describing the extension of the format. | Below is the the instruction that describes the task:
### Input:
Find and return a format by extension.
:param extension: A string describing the extension of the format.
### Response:
def find_by_extension(extension):
"""
Find and return a format by extension.
:param extension: A string describing the extension of the format.
"""
for format in FORMATS:
if extension in format.extensions:
return format
raise UnknownFormat('No format found with extension "%s"' % extension) |
def tags(self):
"""
A dictionary that maps tag names to :class:`Revision` objects.
Here's an example based on a mirror of the git project's repository:
>>> from pprint import pprint
>>> from vcs_repo_mgr.backends.git import GitRepo
>>> repository = GitRepo(remote='https://github.com/git/git.git')
>>> pprint(repository.tags)
{'v0.99': Revision(repository=GitRepo(...),
tag='v0.99',
revision_id='d6602ec5194c87b0fc87103ca4d67251c76f233a'),
'v0.99.1': Revision(repository=GitRepo(...),
tag='v0.99.1',
revision_id='f25a265a342aed6041ab0cc484224d9ca54b6f41'),
'v0.99.2': Revision(repository=GitRepo(...),
tag='v0.99.2',
revision_id='c5db5456ae3b0873fc659c19fafdde22313cc441'),
..., # dozens of tags omitted to keep this example short
'v2.3.6': Revision(repository=GitRepo(...),
tag='v2.3.6',
revision_id='8e7304597727126cdc52771a9091d7075a70cc31'),
'v2.3.7': Revision(repository=GitRepo(...),
tag='v2.3.7',
revision_id='b17db4d9c966de30f5445632411c932150e2ad2f'),
'v2.4.0': Revision(repository=GitRepo(...),
tag='v2.4.0',
revision_id='67308bd628c6235dbc1bad60c9ad1f2d27d576cc')}
"""
# Make sure the local repository exists.
self.create()
# Create a mapping of tag names to revisions.
return dict((r.tag, r) for r in self.find_tags()) | A dictionary that maps tag names to :class:`Revision` objects.
Here's an example based on a mirror of the git project's repository:
>>> from pprint import pprint
>>> from vcs_repo_mgr.backends.git import GitRepo
>>> repository = GitRepo(remote='https://github.com/git/git.git')
>>> pprint(repository.tags)
{'v0.99': Revision(repository=GitRepo(...),
tag='v0.99',
revision_id='d6602ec5194c87b0fc87103ca4d67251c76f233a'),
'v0.99.1': Revision(repository=GitRepo(...),
tag='v0.99.1',
revision_id='f25a265a342aed6041ab0cc484224d9ca54b6f41'),
'v0.99.2': Revision(repository=GitRepo(...),
tag='v0.99.2',
revision_id='c5db5456ae3b0873fc659c19fafdde22313cc441'),
..., # dozens of tags omitted to keep this example short
'v2.3.6': Revision(repository=GitRepo(...),
tag='v2.3.6',
revision_id='8e7304597727126cdc52771a9091d7075a70cc31'),
'v2.3.7': Revision(repository=GitRepo(...),
tag='v2.3.7',
revision_id='b17db4d9c966de30f5445632411c932150e2ad2f'),
'v2.4.0': Revision(repository=GitRepo(...),
tag='v2.4.0',
revision_id='67308bd628c6235dbc1bad60c9ad1f2d27d576cc')} | Below is the the instruction that describes the task:
### Input:
A dictionary that maps tag names to :class:`Revision` objects.
Here's an example based on a mirror of the git project's repository:
>>> from pprint import pprint
>>> from vcs_repo_mgr.backends.git import GitRepo
>>> repository = GitRepo(remote='https://github.com/git/git.git')
>>> pprint(repository.tags)
{'v0.99': Revision(repository=GitRepo(...),
tag='v0.99',
revision_id='d6602ec5194c87b0fc87103ca4d67251c76f233a'),
'v0.99.1': Revision(repository=GitRepo(...),
tag='v0.99.1',
revision_id='f25a265a342aed6041ab0cc484224d9ca54b6f41'),
'v0.99.2': Revision(repository=GitRepo(...),
tag='v0.99.2',
revision_id='c5db5456ae3b0873fc659c19fafdde22313cc441'),
..., # dozens of tags omitted to keep this example short
'v2.3.6': Revision(repository=GitRepo(...),
tag='v2.3.6',
revision_id='8e7304597727126cdc52771a9091d7075a70cc31'),
'v2.3.7': Revision(repository=GitRepo(...),
tag='v2.3.7',
revision_id='b17db4d9c966de30f5445632411c932150e2ad2f'),
'v2.4.0': Revision(repository=GitRepo(...),
tag='v2.4.0',
revision_id='67308bd628c6235dbc1bad60c9ad1f2d27d576cc')}
### Response:
def tags(self):
"""
A dictionary that maps tag names to :class:`Revision` objects.
Here's an example based on a mirror of the git project's repository:
>>> from pprint import pprint
>>> from vcs_repo_mgr.backends.git import GitRepo
>>> repository = GitRepo(remote='https://github.com/git/git.git')
>>> pprint(repository.tags)
{'v0.99': Revision(repository=GitRepo(...),
tag='v0.99',
revision_id='d6602ec5194c87b0fc87103ca4d67251c76f233a'),
'v0.99.1': Revision(repository=GitRepo(...),
tag='v0.99.1',
revision_id='f25a265a342aed6041ab0cc484224d9ca54b6f41'),
'v0.99.2': Revision(repository=GitRepo(...),
tag='v0.99.2',
revision_id='c5db5456ae3b0873fc659c19fafdde22313cc441'),
..., # dozens of tags omitted to keep this example short
'v2.3.6': Revision(repository=GitRepo(...),
tag='v2.3.6',
revision_id='8e7304597727126cdc52771a9091d7075a70cc31'),
'v2.3.7': Revision(repository=GitRepo(...),
tag='v2.3.7',
revision_id='b17db4d9c966de30f5445632411c932150e2ad2f'),
'v2.4.0': Revision(repository=GitRepo(...),
tag='v2.4.0',
revision_id='67308bd628c6235dbc1bad60c9ad1f2d27d576cc')}
"""
# Make sure the local repository exists.
self.create()
# Create a mapping of tag names to revisions.
return dict((r.tag, r) for r in self.find_tags()) |
def print_block_num_row(block_num, cliques, next_cliques):
"""Print out a row of padding and a row with the block number. Includes
the branches prior to this block number."""
n_cliques = len(cliques)
if n_cliques == 0:
print('| {}'.format(block_num))
return
def mapper(clique):
block_id, _ = clique
if block_id not in next_cliques:
return ' '
return '|'
format_str = '{:<' + str(n_cliques * 2) + '} {}'
branches = list(map(mapper, cliques))
for end in ('', block_num):
print(format_str.format(' '.join(branches), end)) | Print out a row of padding and a row with the block number. Includes
the branches prior to this block number. | Below is the the instruction that describes the task:
### Input:
Print out a row of padding and a row with the block number. Includes
the branches prior to this block number.
### Response:
def print_block_num_row(block_num, cliques, next_cliques):
"""Print out a row of padding and a row with the block number. Includes
the branches prior to this block number."""
n_cliques = len(cliques)
if n_cliques == 0:
print('| {}'.format(block_num))
return
def mapper(clique):
block_id, _ = clique
if block_id not in next_cliques:
return ' '
return '|'
format_str = '{:<' + str(n_cliques * 2) + '} {}'
branches = list(map(mapper, cliques))
for end in ('', block_num):
print(format_str.format(' '.join(branches), end)) |
def _ast_op_concat_to_code(self, opr, *, ignore_whitespace, **kwargs):
"""Convert an AST concatenate op to python source code."""
hoist_target = OP_CONCAT if ignore_whitespace else OP_WS_CONCAT
operands = self._hoist_operands(opr.operands, lambda t: isinstance(t, OptreeNode) and t.opnode.operator is hoist_target)
lines = ["concatenation(["]
for op in operands:
lines.extend(self._indent(self._ast_to_code(op, ignore_whitespace=ignore_whitespace)))
lines[-1] += ","
lines.append("], ignore_whitespace={})".format(bool(ignore_whitespace)))
return lines | Convert an AST concatenate op to python source code. | Below is the the instruction that describes the task:
### Input:
Convert an AST concatenate op to python source code.
### Response:
def _ast_op_concat_to_code(self, opr, *, ignore_whitespace, **kwargs):
"""Convert an AST concatenate op to python source code."""
hoist_target = OP_CONCAT if ignore_whitespace else OP_WS_CONCAT
operands = self._hoist_operands(opr.operands, lambda t: isinstance(t, OptreeNode) and t.opnode.operator is hoist_target)
lines = ["concatenation(["]
for op in operands:
lines.extend(self._indent(self._ast_to_code(op, ignore_whitespace=ignore_whitespace)))
lines[-1] += ","
lines.append("], ignore_whitespace={})".format(bool(ignore_whitespace)))
return lines |
def interpolate_xarray_linear(xpoints, ypoints, values, shape, chunks=CHUNK_SIZE):
"""Interpolate linearly, generating a dask array."""
from scipy.interpolate.interpnd import (LinearNDInterpolator,
_ndim_coords_from_arrays)
if isinstance(chunks, (list, tuple)):
vchunks, hchunks = chunks
else:
vchunks, hchunks = chunks, chunks
points = _ndim_coords_from_arrays(np.vstack((np.asarray(ypoints),
np.asarray(xpoints))).T)
interpolator = LinearNDInterpolator(points, values)
grid_x, grid_y = da.meshgrid(da.arange(shape[1], chunks=hchunks),
da.arange(shape[0], chunks=vchunks))
# workaround for non-thread-safe first call of the interpolator:
interpolator((0, 0))
res = da.map_blocks(intp, grid_x, grid_y, interpolator=interpolator)
return DataArray(res, dims=('y', 'x')) | Interpolate linearly, generating a dask array. | Below is the the instruction that describes the task:
### Input:
Interpolate linearly, generating a dask array.
### Response:
def interpolate_xarray_linear(xpoints, ypoints, values, shape, chunks=CHUNK_SIZE):
"""Interpolate linearly, generating a dask array."""
from scipy.interpolate.interpnd import (LinearNDInterpolator,
_ndim_coords_from_arrays)
if isinstance(chunks, (list, tuple)):
vchunks, hchunks = chunks
else:
vchunks, hchunks = chunks, chunks
points = _ndim_coords_from_arrays(np.vstack((np.asarray(ypoints),
np.asarray(xpoints))).T)
interpolator = LinearNDInterpolator(points, values)
grid_x, grid_y = da.meshgrid(da.arange(shape[1], chunks=hchunks),
da.arange(shape[0], chunks=vchunks))
# workaround for non-thread-safe first call of the interpolator:
interpolator((0, 0))
res = da.map_blocks(intp, grid_x, grid_y, interpolator=interpolator)
return DataArray(res, dims=('y', 'x')) |
def run(arguments):
"""Main function for command line usage"""
parser = argparse.ArgumentParser(
description="Exports font icons as PNG images."
)
parser.add_argument(
'--list',
action='store_true',
help="list all available icon names and exit"
)
parser.add_argument(
'--download',
choices=[x for x in AVAILABLE_ICON_FONTS.keys()],
help="download latest icon font and exit"
)
required_group = parser.add_argument_group("required arguments")
required_group.add_argument(
'--ttf',
metavar='TTF-FILE',
type=open,
help='path to TTF file'
)
required_group.add_argument(
'--css',
metavar='CSS-FILE',
type=open,
help="path to CSS file"
)
exp_group = parser.add_argument_group("exporting icons")
exp_group.add_argument(
'icons',
type=str,
nargs='*',
help="names of the icons to export (or 'ALL' for all icons)"
)
exp_group.add_argument(
'--size',
type=int,
default=16,
help="icon size in pixels (default: 16)"
)
exp_group.add_argument(
'--scale',
type=str,
default='auto',
help="scaling factor between 0 and 1, or 'auto' for automatic scaling "
"(default: auto); be careful, as setting it may lead to icons "
"being cropped"
)
exp_group.add_argument(
'--color',
type=str,
default='black',
help="color name or hex value (default: black)"
)
exp_group.add_argument(
'--filename',
type=str,
help="name of the output file (without '.png' extension); "
"it's used as a prefix if multiple icons are exported"
)
exp_group.add_argument(
'--keep_prefix',
default=False,
action='store_true',
help="do not remove common icon prefix "
"(i.e. 'fa-arrow-right' instead of 'arrow-right')"
)
args = parser.parse_args(arguments)
# Parse '--download' argument first
if args.download:
downloader = download_icon_font(args.download, os.getcwd())
downloader.download_files()
print("Icon font '{name}' successfully downloaded".format(
name=args.download)
)
parser.exit()
# If not '--download', then css and tff files are required
if not args.css or not args.ttf:
parser.error("You have to provide CSS and TTF files")
icon_font = IconFont(css_file=args.css.name,
ttf_file=args.ttf.name,
keep_prefix=args.keep_prefix)
args.css.close()
args.ttf.close()
# Then '--list'
if args.list:
for icon in icon_font.css_icons.keys():
print(icon)
parser.exit()
# If not '--list' or '--download', parse passed icons
selected_icons = list()
if not args.icons:
parser.error("You have to pass at least one icon name")
elif args.icons == ['ALL']:
selected_icons = icon_font.css_icons.keys()
else:
for icon in args.icons:
if (args.keep_prefix and
not icon.startswith(icon_font.common_prefix)):
# Prepend icon name with prefix
icon = icon_font.common_prefix + icon
elif (not args.keep_prefix and
icon.startswith(icon_font.common_prefix)):
# Remove prefix from icon name
icon = icon[len(icon_font.common_prefix):]
# Check if given icon names exist
if icon in icon_font.css_icons:
selected_icons.append(icon)
else:
parser.error("Unknown icon name '{icon}'".format(icon=icon))
# Parse filename and remove the extension if necessary
given_filename = args.filename or ''
if given_filename.lower().endswith('.png'):
given_filename = given_filename[:-4]
# Some fonts have empty values
# (prefix only - which we remove - for common styles)
selected_icons = list(filter(None, selected_icons))
# Commence exporting
for icon in selected_icons:
if len(selected_icons) > 1:
# Multiple icons - treat the filename option as name prefix
filename = '{prefix}{icon}.png'.format(
prefix=given_filename, icon=icon,
)
else:
if given_filename:
# Use the specified filename
filename = given_filename + '.png'
else:
# Use icon name as filename
filename = str(icon) + '.png'
print("Exporting icon '{icon}' as '{filename}'"
"({size}x{size} pixels)".format(icon=icon,
filename=filename,
size=args.size))
icon_font.export_icon(icon=icon, filename=filename, size=args.size,
color=args.color, scale=args.scale)
print()
print("All done") | Main function for command line usage | Below is the the instruction that describes the task:
### Input:
Main function for command line usage
### Response:
def run(arguments):
"""Main function for command line usage"""
parser = argparse.ArgumentParser(
description="Exports font icons as PNG images."
)
parser.add_argument(
'--list',
action='store_true',
help="list all available icon names and exit"
)
parser.add_argument(
'--download',
choices=[x for x in AVAILABLE_ICON_FONTS.keys()],
help="download latest icon font and exit"
)
required_group = parser.add_argument_group("required arguments")
required_group.add_argument(
'--ttf',
metavar='TTF-FILE',
type=open,
help='path to TTF file'
)
required_group.add_argument(
'--css',
metavar='CSS-FILE',
type=open,
help="path to CSS file"
)
exp_group = parser.add_argument_group("exporting icons")
exp_group.add_argument(
'icons',
type=str,
nargs='*',
help="names of the icons to export (or 'ALL' for all icons)"
)
exp_group.add_argument(
'--size',
type=int,
default=16,
help="icon size in pixels (default: 16)"
)
exp_group.add_argument(
'--scale',
type=str,
default='auto',
help="scaling factor between 0 and 1, or 'auto' for automatic scaling "
"(default: auto); be careful, as setting it may lead to icons "
"being cropped"
)
exp_group.add_argument(
'--color',
type=str,
default='black',
help="color name or hex value (default: black)"
)
exp_group.add_argument(
'--filename',
type=str,
help="name of the output file (without '.png' extension); "
"it's used as a prefix if multiple icons are exported"
)
exp_group.add_argument(
'--keep_prefix',
default=False,
action='store_true',
help="do not remove common icon prefix "
"(i.e. 'fa-arrow-right' instead of 'arrow-right')"
)
args = parser.parse_args(arguments)
# Parse '--download' argument first
if args.download:
downloader = download_icon_font(args.download, os.getcwd())
downloader.download_files()
print("Icon font '{name}' successfully downloaded".format(
name=args.download)
)
parser.exit()
# If not '--download', then css and tff files are required
if not args.css or not args.ttf:
parser.error("You have to provide CSS and TTF files")
icon_font = IconFont(css_file=args.css.name,
ttf_file=args.ttf.name,
keep_prefix=args.keep_prefix)
args.css.close()
args.ttf.close()
# Then '--list'
if args.list:
for icon in icon_font.css_icons.keys():
print(icon)
parser.exit()
# If not '--list' or '--download', parse passed icons
selected_icons = list()
if not args.icons:
parser.error("You have to pass at least one icon name")
elif args.icons == ['ALL']:
selected_icons = icon_font.css_icons.keys()
else:
for icon in args.icons:
if (args.keep_prefix and
not icon.startswith(icon_font.common_prefix)):
# Prepend icon name with prefix
icon = icon_font.common_prefix + icon
elif (not args.keep_prefix and
icon.startswith(icon_font.common_prefix)):
# Remove prefix from icon name
icon = icon[len(icon_font.common_prefix):]
# Check if given icon names exist
if icon in icon_font.css_icons:
selected_icons.append(icon)
else:
parser.error("Unknown icon name '{icon}'".format(icon=icon))
# Parse filename and remove the extension if necessary
given_filename = args.filename or ''
if given_filename.lower().endswith('.png'):
given_filename = given_filename[:-4]
# Some fonts have empty values
# (prefix only - which we remove - for common styles)
selected_icons = list(filter(None, selected_icons))
# Commence exporting
for icon in selected_icons:
if len(selected_icons) > 1:
# Multiple icons - treat the filename option as name prefix
filename = '{prefix}{icon}.png'.format(
prefix=given_filename, icon=icon,
)
else:
if given_filename:
# Use the specified filename
filename = given_filename + '.png'
else:
# Use icon name as filename
filename = str(icon) + '.png'
print("Exporting icon '{icon}' as '{filename}'"
"({size}x{size} pixels)".format(icon=icon,
filename=filename,
size=args.size))
icon_font.export_icon(icon=icon, filename=filename, size=args.size,
color=args.color, scale=args.scale)
print()
print("All done") |
def confd_state_rest_listen_tcp_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
rest = ET.SubElement(confd_state, "rest")
listen = ET.SubElement(rest, "listen")
tcp = ET.SubElement(listen, "tcp")
port = ET.SubElement(tcp, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def confd_state_rest_listen_tcp_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
rest = ET.SubElement(confd_state, "rest")
listen = ET.SubElement(rest, "listen")
tcp = ET.SubElement(listen, "tcp")
port = ET.SubElement(tcp, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def moments(self):
"""The first two time delay weighted statistical moments of the
MA coefficients."""
moment1 = statstools.calc_mean_time(self.delays, self.coefs)
moment2 = statstools.calc_mean_time_deviation(
self.delays, self.coefs, moment1)
return numpy.array([moment1, moment2]) | The first two time delay weighted statistical moments of the
MA coefficients. | Below is the the instruction that describes the task:
### Input:
The first two time delay weighted statistical moments of the
MA coefficients.
### Response:
def moments(self):
"""The first two time delay weighted statistical moments of the
MA coefficients."""
moment1 = statstools.calc_mean_time(self.delays, self.coefs)
moment2 = statstools.calc_mean_time_deviation(
self.delays, self.coefs, moment1)
return numpy.array([moment1, moment2]) |
def interm_range_type(self) -> Sequence[str]:
'''The range type of each intermediate fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent.
'''
fluents = self.domain.intermediate_fluents
ordering = self.domain.interm_fluent_ordering
return self._fluent_range_type(fluents, ordering) | The range type of each intermediate fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent. | Below is the the instruction that describes the task:
### Input:
The range type of each intermediate fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent.
### Response:
def interm_range_type(self) -> Sequence[str]:
'''The range type of each intermediate fluent in canonical order.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent.
'''
fluents = self.domain.intermediate_fluents
ordering = self.domain.interm_fluent_ordering
return self._fluent_range_type(fluents, ordering) |
def _init_tag_params(self, tag, params):
"""
Alternative constructor used when the tag parameters are added to the
HTMLElement (HTMLElement(tag, params)).
This method just creates string and then pass it to the
:meth:`_init_tag`.
Args:
tag (str): HTML tag as string.
params (dict): HTML tag parameters as dictionary.
"""
self._element = tag
self.params = params
self._parseTagName()
self._istag = True
self._isendtag = False
self._isnonpairtag = False
self._element = self.tagToString() | Alternative constructor used when the tag parameters are added to the
HTMLElement (HTMLElement(tag, params)).
This method just creates string and then pass it to the
:meth:`_init_tag`.
Args:
tag (str): HTML tag as string.
params (dict): HTML tag parameters as dictionary. | Below is the the instruction that describes the task:
### Input:
Alternative constructor used when the tag parameters are added to the
HTMLElement (HTMLElement(tag, params)).
This method just creates string and then pass it to the
:meth:`_init_tag`.
Args:
tag (str): HTML tag as string.
params (dict): HTML tag parameters as dictionary.
### Response:
def _init_tag_params(self, tag, params):
"""
Alternative constructor used when the tag parameters are added to the
HTMLElement (HTMLElement(tag, params)).
This method just creates string and then pass it to the
:meth:`_init_tag`.
Args:
tag (str): HTML tag as string.
params (dict): HTML tag parameters as dictionary.
"""
self._element = tag
self.params = params
self._parseTagName()
self._istag = True
self._isendtag = False
self._isnonpairtag = False
self._element = self.tagToString() |
def checkPortIsOpen(remoteServerHost=ServerHost, port = Port):
'''
Checks if the specified port is open
:param remoteServerHost: the host address
:param port: port which needs to be checked
:return: ``True`` if port is open, ``False`` otherwise
'''
remoteServerIP = socket.gethostbyname(remoteServerHost)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, int(port)))
if result == 0:
return True
else :
return False
sock.close()
#FIXME: the above line is unreachable
except KeyboardInterrupt:
print("You pressed Ctrl+C")
sys.exit()
except socket.gaierror:
print('Hostname could not be resolved. Exiting')
sys.exit()
except socket.error:
print("Couldn't connect to server")
sys.exit() | Checks if the specified port is open
:param remoteServerHost: the host address
:param port: port which needs to be checked
:return: ``True`` if port is open, ``False`` otherwise | Below is the the instruction that describes the task:
### Input:
Checks if the specified port is open
:param remoteServerHost: the host address
:param port: port which needs to be checked
:return: ``True`` if port is open, ``False`` otherwise
### Response:
def checkPortIsOpen(remoteServerHost=ServerHost, port = Port):
'''
Checks if the specified port is open
:param remoteServerHost: the host address
:param port: port which needs to be checked
:return: ``True`` if port is open, ``False`` otherwise
'''
remoteServerIP = socket.gethostbyname(remoteServerHost)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, int(port)))
if result == 0:
return True
else :
return False
sock.close()
#FIXME: the above line is unreachable
except KeyboardInterrupt:
print("You pressed Ctrl+C")
sys.exit()
except socket.gaierror:
print('Hostname could not be resolved. Exiting')
sys.exit()
except socket.error:
print("Couldn't connect to server")
sys.exit() |
def check_cmake_exists(cmake_command):
"""
Check whether CMake is installed. If not, print
informative error message and quits.
"""
from subprocess import Popen, PIPE
p = Popen(
'{0} --version'.format(cmake_command),
shell=True,
stdin=PIPE,
stdout=PIPE)
if not ('cmake version' in p.communicate()[0].decode('UTF-8')):
sys.stderr.write(' This code is built using CMake\n\n')
sys.stderr.write(' CMake is not found\n')
sys.stderr.write(' get CMake at http://www.cmake.org/\n')
sys.stderr.write(' on many clusters CMake is installed\n')
sys.stderr.write(' but you have to load it first:\n')
sys.stderr.write(' $ module load cmake\n')
sys.exit(1) | Check whether CMake is installed. If not, print
informative error message and quits. | Below is the the instruction that describes the task:
### Input:
Check whether CMake is installed. If not, print
informative error message and quits.
### Response:
def check_cmake_exists(cmake_command):
"""
Check whether CMake is installed. If not, print
informative error message and quits.
"""
from subprocess import Popen, PIPE
p = Popen(
'{0} --version'.format(cmake_command),
shell=True,
stdin=PIPE,
stdout=PIPE)
if not ('cmake version' in p.communicate()[0].decode('UTF-8')):
sys.stderr.write(' This code is built using CMake\n\n')
sys.stderr.write(' CMake is not found\n')
sys.stderr.write(' get CMake at http://www.cmake.org/\n')
sys.stderr.write(' on many clusters CMake is installed\n')
sys.stderr.write(' but you have to load it first:\n')
sys.stderr.write(' $ module load cmake\n')
sys.exit(1) |
def occurrence(self, file_name=None, path=None, date=None):
"""Add a file Occurrence.
Args:
file_name (str, optional): The file name for this occurrence.
path (str, optional): The file path for this occurrence.
date (str, optional): The datetime expression for this occurrence.
Returns:
obj: An instance of Occurrence.
"""
if self._indicator_data.get('type') != 'File':
# Indicator object has no logger to output warning
return None
occurrence_obj = FileOccurrence(file_name, path, date)
self._occurrences.append(occurrence_obj)
return occurrence_obj | Add a file Occurrence.
Args:
file_name (str, optional): The file name for this occurrence.
path (str, optional): The file path for this occurrence.
date (str, optional): The datetime expression for this occurrence.
Returns:
obj: An instance of Occurrence. | Below is the the instruction that describes the task:
### Input:
Add a file Occurrence.
Args:
file_name (str, optional): The file name for this occurrence.
path (str, optional): The file path for this occurrence.
date (str, optional): The datetime expression for this occurrence.
Returns:
obj: An instance of Occurrence.
### Response:
def occurrence(self, file_name=None, path=None, date=None):
"""Add a file Occurrence.
Args:
file_name (str, optional): The file name for this occurrence.
path (str, optional): The file path for this occurrence.
date (str, optional): The datetime expression for this occurrence.
Returns:
obj: An instance of Occurrence.
"""
if self._indicator_data.get('type') != 'File':
# Indicator object has no logger to output warning
return None
occurrence_obj = FileOccurrence(file_name, path, date)
self._occurrences.append(occurrence_obj)
return occurrence_obj |
def _addPort(n: LNode, lp: LPort, intf: Interface,
reverseDirection=False):
"""
add port to LPort for interface
"""
origin = originObjOfPort(intf)
d = intf._direction
d = PortTypeFromDir(d)
if reverseDirection:
d = PortType.opposite(d)
new_lp = LPort(lp, d, lp.side, name=intf._name)
new_lp.originObj = origin
if intf._interfaces:
for child_intf in intf._interfaces:
_addPort(n, new_lp, child_intf,
reverseDirection=reverseDirection)
lp.children.append(new_lp)
new_lp.parent = lp
if n._node2lnode is not None:
n._node2lnode[origin] = new_lp
return new_lp | add port to LPort for interface | Below is the the instruction that describes the task:
### Input:
add port to LPort for interface
### Response:
def _addPort(n: LNode, lp: LPort, intf: Interface,
reverseDirection=False):
"""
add port to LPort for interface
"""
origin = originObjOfPort(intf)
d = intf._direction
d = PortTypeFromDir(d)
if reverseDirection:
d = PortType.opposite(d)
new_lp = LPort(lp, d, lp.side, name=intf._name)
new_lp.originObj = origin
if intf._interfaces:
for child_intf in intf._interfaces:
_addPort(n, new_lp, child_intf,
reverseDirection=reverseDirection)
lp.children.append(new_lp)
new_lp.parent = lp
if n._node2lnode is not None:
n._node2lnode[origin] = new_lp
return new_lp |
def periodic_callback(self):
"""Periodic cleanup tasks to maintain this adapter, should be called every second. """
if self.stopped:
return
# Check if we should start scanning again
if not self.scanning and len(self.connections.get_connections()) == 0:
self._logger.info("Restarting scan for devices")
self.start_scan(self._active_scan)
self._logger.info("Finished restarting scan for devices") | Periodic cleanup tasks to maintain this adapter, should be called every second. | Below is the the instruction that describes the task:
### Input:
Periodic cleanup tasks to maintain this adapter, should be called every second.
### Response:
def periodic_callback(self):
"""Periodic cleanup tasks to maintain this adapter, should be called every second. """
if self.stopped:
return
# Check if we should start scanning again
if not self.scanning and len(self.connections.get_connections()) == 0:
self._logger.info("Restarting scan for devices")
self.start_scan(self._active_scan)
self._logger.info("Finished restarting scan for devices") |
def atlas_find_missing_zonefile_availability( peer_table=None, con=None, path=None, missing_zonefile_info=None ):
"""
Find the set of missing zonefiles, as well as their popularity amongst
our neighbors.
Only consider zonefiles that are known by at least
one peer; otherwise they're missing from
our clique (and we'll re-sync our neighborss' inventories
every so often to make sure we detect when zonefiles
become available).
Return a dict, structured as:
{
'zonefile hash': {
'names': [names],
'txid': first txid that set it,
'indexes': [...],
'popularity': ...,
'peers': [...],
'tried_storage': True|False
}
}
"""
# which zonefiles do we have?
bit_offset = 0
bit_count = 10000
missing = []
ret = {}
if missing_zonefile_info is None:
while True:
zfinfo = atlasdb_zonefile_find_missing( bit_offset, bit_count, con=con, path=path )
if len(zfinfo) == 0:
break
missing += zfinfo
bit_offset += len(zfinfo)
if len(missing) > 0:
log.debug("Missing %s zonefiles" % len(missing))
else:
missing = missing_zonefile_info
if len(missing) == 0:
# none!
return ret
with AtlasPeerTableLocked(peer_table) as ptbl:
# do any other peers have this zonefile?
for zfinfo in missing:
popularity = 0
byte_index = (zfinfo['inv_index'] - 1) / 8
bit_index = 7 - ((zfinfo['inv_index'] - 1) % 8)
peers = []
if not ret.has_key(zfinfo['zonefile_hash']):
ret[zfinfo['zonefile_hash']] = {
'names': [],
'txid': zfinfo['txid'],
'indexes': [],
'block_heights': [],
'popularity': 0,
'peers': [],
'tried_storage': False
}
for peer_hostport in ptbl.keys():
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
if len(peer_inv) <= byte_index:
# too new for this peer
continue
if (ord(peer_inv[byte_index]) & (1 << bit_index)) == 0:
# this peer doesn't have it
continue
if peer_hostport not in ret[zfinfo['zonefile_hash']]['peers']:
popularity += 1
peers.append( peer_hostport )
ret[zfinfo['zonefile_hash']]['names'].append( zfinfo['name'] )
ret[zfinfo['zonefile_hash']]['indexes'].append( zfinfo['inv_index']-1 )
ret[zfinfo['zonefile_hash']]['block_heights'].append( zfinfo['block_height'] )
ret[zfinfo['zonefile_hash']]['popularity'] += popularity
ret[zfinfo['zonefile_hash']]['peers'] += peers
ret[zfinfo['zonefile_hash']]['tried_storage'] = zfinfo['tried_storage']
return ret | Find the set of missing zonefiles, as well as their popularity amongst
our neighbors.
Only consider zonefiles that are known by at least
one peer; otherwise they're missing from
our clique (and we'll re-sync our neighborss' inventories
every so often to make sure we detect when zonefiles
become available).
Return a dict, structured as:
{
'zonefile hash': {
'names': [names],
'txid': first txid that set it,
'indexes': [...],
'popularity': ...,
'peers': [...],
'tried_storage': True|False
}
} | Below is the the instruction that describes the task:
### Input:
Find the set of missing zonefiles, as well as their popularity amongst
our neighbors.
Only consider zonefiles that are known by at least
one peer; otherwise they're missing from
our clique (and we'll re-sync our neighborss' inventories
every so often to make sure we detect when zonefiles
become available).
Return a dict, structured as:
{
'zonefile hash': {
'names': [names],
'txid': first txid that set it,
'indexes': [...],
'popularity': ...,
'peers': [...],
'tried_storage': True|False
}
}
### Response:
def atlas_find_missing_zonefile_availability( peer_table=None, con=None, path=None, missing_zonefile_info=None ):
"""
Find the set of missing zonefiles, as well as their popularity amongst
our neighbors.
Only consider zonefiles that are known by at least
one peer; otherwise they're missing from
our clique (and we'll re-sync our neighborss' inventories
every so often to make sure we detect when zonefiles
become available).
Return a dict, structured as:
{
'zonefile hash': {
'names': [names],
'txid': first txid that set it,
'indexes': [...],
'popularity': ...,
'peers': [...],
'tried_storage': True|False
}
}
"""
# which zonefiles do we have?
bit_offset = 0
bit_count = 10000
missing = []
ret = {}
if missing_zonefile_info is None:
while True:
zfinfo = atlasdb_zonefile_find_missing( bit_offset, bit_count, con=con, path=path )
if len(zfinfo) == 0:
break
missing += zfinfo
bit_offset += len(zfinfo)
if len(missing) > 0:
log.debug("Missing %s zonefiles" % len(missing))
else:
missing = missing_zonefile_info
if len(missing) == 0:
# none!
return ret
with AtlasPeerTableLocked(peer_table) as ptbl:
# do any other peers have this zonefile?
for zfinfo in missing:
popularity = 0
byte_index = (zfinfo['inv_index'] - 1) / 8
bit_index = 7 - ((zfinfo['inv_index'] - 1) % 8)
peers = []
if not ret.has_key(zfinfo['zonefile_hash']):
ret[zfinfo['zonefile_hash']] = {
'names': [],
'txid': zfinfo['txid'],
'indexes': [],
'block_heights': [],
'popularity': 0,
'peers': [],
'tried_storage': False
}
for peer_hostport in ptbl.keys():
peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl )
if len(peer_inv) <= byte_index:
# too new for this peer
continue
if (ord(peer_inv[byte_index]) & (1 << bit_index)) == 0:
# this peer doesn't have it
continue
if peer_hostport not in ret[zfinfo['zonefile_hash']]['peers']:
popularity += 1
peers.append( peer_hostport )
ret[zfinfo['zonefile_hash']]['names'].append( zfinfo['name'] )
ret[zfinfo['zonefile_hash']]['indexes'].append( zfinfo['inv_index']-1 )
ret[zfinfo['zonefile_hash']]['block_heights'].append( zfinfo['block_height'] )
ret[zfinfo['zonefile_hash']]['popularity'] += popularity
ret[zfinfo['zonefile_hash']]['peers'] += peers
ret[zfinfo['zonefile_hash']]['tried_storage'] = zfinfo['tried_storage']
return ret |
def _getCharFont(self, font_files, code_point):
"""
Returns font files containing given code point.
"""
return_font_files = []
for font_file in font_files:
face = ft.Face(font_file)
if face.get_char_index(code_point):
return_font_files.append(font_file)
return return_font_files | Returns font files containing given code point. | Below is the the instruction that describes the task:
### Input:
Returns font files containing given code point.
### Response:
def _getCharFont(self, font_files, code_point):
"""
Returns font files containing given code point.
"""
return_font_files = []
for font_file in font_files:
face = ft.Face(font_file)
if face.get_char_index(code_point):
return_font_files.append(font_file)
return return_font_files |
def Zabransky_quasi_polynomial_integral(T, Tc, a1, a2, a3, a4, a5, a6):
r'''Calculates the integral of liquid heat capacity using the
quasi-polynomial model developed in [1]_.
Parameters
----------
T : float
Temperature [K]
a1-a6 : float
Coefficients
Returns
-------
H : float
Difference in enthalpy from 0 K, [J/mol]
Notes
-----
The analytical integral was derived with SymPy; it is a simple polynomial
plus some logarithms.
Examples
--------
>>> H2 = Zabransky_quasi_polynomial_integral(300, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> H1 = Zabransky_quasi_polynomial_integral(200, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> H2 - H1
14662.026406892925
References
----------
.. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski.
Heat Capacity of Liquids: Critical Review and Recommended Values.
2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996.
'''
Tc2 = Tc*Tc
Tc3 = Tc2*Tc
term = T - Tc
return R*(T*(T*(T*(T*a6/(4.*Tc3) + a5/(3.*Tc2)) + a4/(2.*Tc)) - a1 + a3)
+ T*a1*log(1. - T/Tc) - 0.5*Tc*(a1 + a2)*log(term*term)) | r'''Calculates the integral of liquid heat capacity using the
quasi-polynomial model developed in [1]_.
Parameters
----------
T : float
Temperature [K]
a1-a6 : float
Coefficients
Returns
-------
H : float
Difference in enthalpy from 0 K, [J/mol]
Notes
-----
The analytical integral was derived with SymPy; it is a simple polynomial
plus some logarithms.
Examples
--------
>>> H2 = Zabransky_quasi_polynomial_integral(300, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> H1 = Zabransky_quasi_polynomial_integral(200, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> H2 - H1
14662.026406892925
References
----------
.. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski.
Heat Capacity of Liquids: Critical Review and Recommended Values.
2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the integral of liquid heat capacity using the
quasi-polynomial model developed in [1]_.
Parameters
----------
T : float
Temperature [K]
a1-a6 : float
Coefficients
Returns
-------
H : float
Difference in enthalpy from 0 K, [J/mol]
Notes
-----
The analytical integral was derived with SymPy; it is a simple polynomial
plus some logarithms.
Examples
--------
>>> H2 = Zabransky_quasi_polynomial_integral(300, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> H1 = Zabransky_quasi_polynomial_integral(200, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> H2 - H1
14662.026406892925
References
----------
.. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski.
Heat Capacity of Liquids: Critical Review and Recommended Values.
2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996.
### Response:
def Zabransky_quasi_polynomial_integral(T, Tc, a1, a2, a3, a4, a5, a6):
r'''Calculates the integral of liquid heat capacity using the
quasi-polynomial model developed in [1]_.
Parameters
----------
T : float
Temperature [K]
a1-a6 : float
Coefficients
Returns
-------
H : float
Difference in enthalpy from 0 K, [J/mol]
Notes
-----
The analytical integral was derived with SymPy; it is a simple polynomial
plus some logarithms.
Examples
--------
>>> H2 = Zabransky_quasi_polynomial_integral(300, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> H1 = Zabransky_quasi_polynomial_integral(200, 591.79, -3.12743,
... 0.0857315, 13.7282, 1.28971, 6.42297, 4.10989)
>>> H2 - H1
14662.026406892925
References
----------
.. [1] Zabransky, M., V. Ruzicka Jr, V. Majer, and Eugene S. Domalski.
Heat Capacity of Liquids: Critical Review and Recommended Values.
2 Volume Set. Washington, D.C.: Amer Inst of Physics, 1996.
'''
Tc2 = Tc*Tc
Tc3 = Tc2*Tc
term = T - Tc
return R*(T*(T*(T*(T*a6/(4.*Tc3) + a5/(3.*Tc2)) + a4/(2.*Tc)) - a1 + a3)
+ T*a1*log(1. - T/Tc) - 0.5*Tc*(a1 + a2)*log(term*term)) |
def _open_fp(self, fp):
# type: (BinaryIO) -> None
'''
An internal method to open an existing ISO for inspection and
modification. Note that the file object passed in here must stay open
for the lifetime of this object, as the PyCdlib class uses it internally
to do writing and reading operations.
Parameters:
fp - The file object containing the ISO to open up.
Returns:
Nothing.
'''
if hasattr(fp, 'mode') and 'b' not in fp.mode:
raise pycdlibexception.PyCdlibInvalidInput("The file to open must be in binary mode (add 'b' to the open flags)")
self._cdfp = fp
# Get the Primary Volume Descriptor (pvd), the set of Supplementary
# Volume Descriptors (svds), the set of Volume Partition
# Descriptors (vpds), the set of Boot Records (brs), and the set of
# Volume Descriptor Set Terminators (vdsts)
self._parse_volume_descriptors()
old = self._cdfp.tell()
self._cdfp.seek(0)
tmp_mbr = isohybrid.IsoHybrid()
if tmp_mbr.parse(self._cdfp.read(512)):
# We only save the object if it turns out to be a valid IsoHybrid
self.isohybrid_mbr = tmp_mbr
self._cdfp.seek(old)
if self.pvd.application_use[141:149] == b'CD-XA001':
self.xa = True
for br in self.brs:
self._check_and_parse_eltorito(br)
# Now that we have the PVD, parse the Path Tables according to Ecma-119
# section 9.4. We want to ensure that the big endian versions agree
# with the little endian ones (to make sure it is a valid ISO).
# Little Endian first
le_ptrs, extent_to_ptr = self._parse_path_table(self.pvd.path_table_size(),
self.pvd.path_table_location_le)
# Big Endian next.
tmp_be_ptrs, e_unused = self._parse_path_table(self.pvd.path_table_size(),
self.pvd.path_table_location_be)
for index, ptr in enumerate(le_ptrs):
if not ptr.equal_to_be(tmp_be_ptrs[index]):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian path table records do not agree')
self.interchange_level = 1
for svd in self.svds:
if svd.version == 2 and svd.file_structure_version == 2:
self.interchange_level = 4
break
extent_to_inode = {} # type: Dict[int, inode.Inode]
# OK, so now that we have the PVD, we start at its root directory
# record and find all of the files
ic_level, lastbyte = self._walk_directories(self.pvd, extent_to_ptr,
extent_to_inode, le_ptrs)
self.interchange_level = max(self.interchange_level, ic_level)
# On El Torito ISOs, after we have walked the directories we look
# to see if all of the entries in El Torito have corresponding
# directory records. If they don't, then it may be the case that
# the El Torito bits of the system are 'hidden' or 'unlinked',
# meaning that they take up space but have no corresponding directory
# record in the ISO filesystem. In order to accommodate the rest
# of the system, which really expects these things to have directory
# records, we use fake directory records that don't get written out.
#
# Note that we specifically do *not* add these to any sort of parent;
# that way, we don't run afoul of any checks that adding a child to a
# parent might have. This means that if we do ever want to unhide this
# entry, we'll have to do some additional work to give it a real name
# and link it to the appropriate parent.
if self.eltorito_boot_catalog is not None:
self._link_eltorito(extent_to_inode)
# Now that everything has a dirrecord, see if we have a boot
# info table.
self._check_for_eltorito_boot_info_table(self.eltorito_boot_catalog.initial_entry.inode)
for sec in self.eltorito_boot_catalog.sections:
for entry in sec.section_entries:
self._check_for_eltorito_boot_info_table(entry.inode)
# The PVD is finished. Now look to see if we need to parse the SVD.
for svd in self.svds:
if (svd.flags & 0x1) == 0 and svd.escape_sequences[:3] in (b'%/@', b'%/C', b'%/E'):
if self.joliet_vd is not None:
raise pycdlibexception.PyCdlibInvalidISO('Only a single Joliet SVD is supported')
self.joliet_vd = svd
le_ptrs, joliet_extent_to_ptr = self._parse_path_table(svd.path_table_size(),
svd.path_table_location_le)
tmp_be_ptrs, j_unused = self._parse_path_table(svd.path_table_size(),
svd.path_table_location_be)
for index, ptr in enumerate(le_ptrs):
if not ptr.equal_to_be(tmp_be_ptrs[index]):
raise pycdlibexception.PyCdlibInvalidISO('Joliet little-endian and big-endian path table records do not agree')
self._walk_directories(svd, joliet_extent_to_ptr,
extent_to_inode, le_ptrs)
elif svd.version == 2 and svd.file_structure_version == 2:
if self.enhanced_vd is not None:
raise pycdlibexception.PyCdlibInvalidISO('Only a single enhanced VD is supported')
self.enhanced_vd = svd
# We've seen ISOs in the wild (Office XP) that have a PVD space size
# that is smaller than the location of the last directory record
# extent + length. If we see this, automatically update the size in the
# PVD (and any SVDs) so that subsequent operations will be correct.
log_block_size = self.pvd.logical_block_size()
if lastbyte > self.pvd.space_size * log_block_size:
new_pvd_size = utils.ceiling_div(lastbyte, log_block_size)
for pvd in self.pvds:
pvd.space_size = new_pvd_size
if self.joliet_vd is not None:
self.joliet_vd.space_size = new_pvd_size
if self.enhanced_vd is not None:
self.enhanced_vd.space_size = new_pvd_size
# Look to see if this is a UDF volume. It is one if we have a UDF BEA,
# UDF NSR, and UDF TEA, in which case we parse the UDF descriptors and
# walk the filesystem.
if self._has_udf:
self._parse_udf_descriptors()
self._walk_udf_directories(extent_to_inode)
# Now we look for the 'version' volume descriptor, common on ISOs made
# with genisoimage or mkisofs. This volume descriptor doesn't have any
# specification, but from code inspection, it is either a completely
# zero extent, or starts with 'MKI'. Further, it starts directly after
# the VDST, or directly after the UDF recognition sequence (if this is
# a UDF ISO). Thus, we go looking for it at those places, and add it
# if we find it there.
version_vd_extent = self.vdsts[0].extent_location() + 1
if self._has_udf:
version_vd_extent = self.udf_tea.extent_location() + 1
version_vd = headervd.VersionVolumeDescriptor()
self._cdfp.seek(version_vd_extent * log_block_size)
if version_vd.parse(self._cdfp.read(log_block_size), version_vd_extent):
self.version_vd = version_vd
self._initialized = True | An internal method to open an existing ISO for inspection and
modification. Note that the file object passed in here must stay open
for the lifetime of this object, as the PyCdlib class uses it internally
to do writing and reading operations.
Parameters:
fp - The file object containing the ISO to open up.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
An internal method to open an existing ISO for inspection and
modification. Note that the file object passed in here must stay open
for the lifetime of this object, as the PyCdlib class uses it internally
to do writing and reading operations.
Parameters:
fp - The file object containing the ISO to open up.
Returns:
Nothing.
### Response:
def _open_fp(self, fp):
# type: (BinaryIO) -> None
'''
An internal method to open an existing ISO for inspection and
modification. Note that the file object passed in here must stay open
for the lifetime of this object, as the PyCdlib class uses it internally
to do writing and reading operations.
Parameters:
fp - The file object containing the ISO to open up.
Returns:
Nothing.
'''
if hasattr(fp, 'mode') and 'b' not in fp.mode:
raise pycdlibexception.PyCdlibInvalidInput("The file to open must be in binary mode (add 'b' to the open flags)")
self._cdfp = fp
# Get the Primary Volume Descriptor (pvd), the set of Supplementary
# Volume Descriptors (svds), the set of Volume Partition
# Descriptors (vpds), the set of Boot Records (brs), and the set of
# Volume Descriptor Set Terminators (vdsts)
self._parse_volume_descriptors()
old = self._cdfp.tell()
self._cdfp.seek(0)
tmp_mbr = isohybrid.IsoHybrid()
if tmp_mbr.parse(self._cdfp.read(512)):
# We only save the object if it turns out to be a valid IsoHybrid
self.isohybrid_mbr = tmp_mbr
self._cdfp.seek(old)
if self.pvd.application_use[141:149] == b'CD-XA001':
self.xa = True
for br in self.brs:
self._check_and_parse_eltorito(br)
# Now that we have the PVD, parse the Path Tables according to Ecma-119
# section 9.4. We want to ensure that the big endian versions agree
# with the little endian ones (to make sure it is a valid ISO).
# Little Endian first
le_ptrs, extent_to_ptr = self._parse_path_table(self.pvd.path_table_size(),
self.pvd.path_table_location_le)
# Big Endian next.
tmp_be_ptrs, e_unused = self._parse_path_table(self.pvd.path_table_size(),
self.pvd.path_table_location_be)
for index, ptr in enumerate(le_ptrs):
if not ptr.equal_to_be(tmp_be_ptrs[index]):
raise pycdlibexception.PyCdlibInvalidISO('Little-endian and big-endian path table records do not agree')
self.interchange_level = 1
for svd in self.svds:
if svd.version == 2 and svd.file_structure_version == 2:
self.interchange_level = 4
break
extent_to_inode = {} # type: Dict[int, inode.Inode]
# OK, so now that we have the PVD, we start at its root directory
# record and find all of the files
ic_level, lastbyte = self._walk_directories(self.pvd, extent_to_ptr,
extent_to_inode, le_ptrs)
self.interchange_level = max(self.interchange_level, ic_level)
# On El Torito ISOs, after we have walked the directories we look
# to see if all of the entries in El Torito have corresponding
# directory records. If they don't, then it may be the case that
# the El Torito bits of the system are 'hidden' or 'unlinked',
# meaning that they take up space but have no corresponding directory
# record in the ISO filesystem. In order to accommodate the rest
# of the system, which really expects these things to have directory
# records, we use fake directory records that don't get written out.
#
# Note that we specifically do *not* add these to any sort of parent;
# that way, we don't run afoul of any checks that adding a child to a
# parent might have. This means that if we do ever want to unhide this
# entry, we'll have to do some additional work to give it a real name
# and link it to the appropriate parent.
if self.eltorito_boot_catalog is not None:
self._link_eltorito(extent_to_inode)
# Now that everything has a dirrecord, see if we have a boot
# info table.
self._check_for_eltorito_boot_info_table(self.eltorito_boot_catalog.initial_entry.inode)
for sec in self.eltorito_boot_catalog.sections:
for entry in sec.section_entries:
self._check_for_eltorito_boot_info_table(entry.inode)
# The PVD is finished. Now look to see if we need to parse the SVD.
for svd in self.svds:
if (svd.flags & 0x1) == 0 and svd.escape_sequences[:3] in (b'%/@', b'%/C', b'%/E'):
if self.joliet_vd is not None:
raise pycdlibexception.PyCdlibInvalidISO('Only a single Joliet SVD is supported')
self.joliet_vd = svd
le_ptrs, joliet_extent_to_ptr = self._parse_path_table(svd.path_table_size(),
svd.path_table_location_le)
tmp_be_ptrs, j_unused = self._parse_path_table(svd.path_table_size(),
svd.path_table_location_be)
for index, ptr in enumerate(le_ptrs):
if not ptr.equal_to_be(tmp_be_ptrs[index]):
raise pycdlibexception.PyCdlibInvalidISO('Joliet little-endian and big-endian path table records do not agree')
self._walk_directories(svd, joliet_extent_to_ptr,
extent_to_inode, le_ptrs)
elif svd.version == 2 and svd.file_structure_version == 2:
if self.enhanced_vd is not None:
raise pycdlibexception.PyCdlibInvalidISO('Only a single enhanced VD is supported')
self.enhanced_vd = svd
# We've seen ISOs in the wild (Office XP) that have a PVD space size
# that is smaller than the location of the last directory record
# extent + length. If we see this, automatically update the size in the
# PVD (and any SVDs) so that subsequent operations will be correct.
log_block_size = self.pvd.logical_block_size()
if lastbyte > self.pvd.space_size * log_block_size:
new_pvd_size = utils.ceiling_div(lastbyte, log_block_size)
for pvd in self.pvds:
pvd.space_size = new_pvd_size
if self.joliet_vd is not None:
self.joliet_vd.space_size = new_pvd_size
if self.enhanced_vd is not None:
self.enhanced_vd.space_size = new_pvd_size
# Look to see if this is a UDF volume. It is one if we have a UDF BEA,
# UDF NSR, and UDF TEA, in which case we parse the UDF descriptors and
# walk the filesystem.
if self._has_udf:
self._parse_udf_descriptors()
self._walk_udf_directories(extent_to_inode)
# Now we look for the 'version' volume descriptor, common on ISOs made
# with genisoimage or mkisofs. This volume descriptor doesn't have any
# specification, but from code inspection, it is either a completely
# zero extent, or starts with 'MKI'. Further, it starts directly after
# the VDST, or directly after the UDF recognition sequence (if this is
# a UDF ISO). Thus, we go looking for it at those places, and add it
# if we find it there.
version_vd_extent = self.vdsts[0].extent_location() + 1
if self._has_udf:
version_vd_extent = self.udf_tea.extent_location() + 1
version_vd = headervd.VersionVolumeDescriptor()
self._cdfp.seek(version_vd_extent * log_block_size)
if version_vd.parse(self._cdfp.read(log_block_size), version_vd_extent):
self.version_vd = version_vd
self._initialized = True |
def SetDayOfWeekHasService(self, dow, has_service=True):
"""Set service as running (or not) on a day of the week. By default the
service does not run on any days.
Args:
dow: 0 for Monday through 6 for Sunday
has_service: True if this service operates on dow, False if it does not.
Returns:
None
"""
assert(dow >= 0 and dow < 7)
self.day_of_week[dow] = has_service | Set service as running (or not) on a day of the week. By default the
service does not run on any days.
Args:
dow: 0 for Monday through 6 for Sunday
has_service: True if this service operates on dow, False if it does not.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Set service as running (or not) on a day of the week. By default the
service does not run on any days.
Args:
dow: 0 for Monday through 6 for Sunday
has_service: True if this service operates on dow, False if it does not.
Returns:
None
### Response:
def SetDayOfWeekHasService(self, dow, has_service=True):
"""Set service as running (or not) on a day of the week. By default the
service does not run on any days.
Args:
dow: 0 for Monday through 6 for Sunday
has_service: True if this service operates on dow, False if it does not.
Returns:
None
"""
assert(dow >= 0 and dow < 7)
self.day_of_week[dow] = has_service |
def future_check_request(self, name, update=None):
"""Check if the request exists.
Used internally by future_get_request. This method is aware of
synchronisation in progress and if inspection of the server is allowed.
Parameters
----------
name : str
Name of the request to verify.
update : bool or None, optional
If a katcp request to the server should be made to check if the
sensor is on the server. True = Allow, False do not Allow, None
use the class default.
Notes
-----
Ensure that self.state.data_synced == True if yielding to future_check_request
from a state-change callback, or a deadlock will occur.
"""
exist = False
yield self.until_data_synced()
if name in self._requests_index:
exist = True
else:
if update or (update is None and self._update_on_lookup):
yield self.inspect_requests(name)
exist = yield self.future_check_request(name, False)
raise tornado.gen.Return(exist) | Check if the request exists.
Used internally by future_get_request. This method is aware of
synchronisation in progress and if inspection of the server is allowed.
Parameters
----------
name : str
Name of the request to verify.
update : bool or None, optional
If a katcp request to the server should be made to check if the
sensor is on the server. True = Allow, False do not Allow, None
use the class default.
Notes
-----
Ensure that self.state.data_synced == True if yielding to future_check_request
from a state-change callback, or a deadlock will occur. | Below is the the instruction that describes the task:
### Input:
Check if the request exists.
Used internally by future_get_request. This method is aware of
synchronisation in progress and if inspection of the server is allowed.
Parameters
----------
name : str
Name of the request to verify.
update : bool or None, optional
If a katcp request to the server should be made to check if the
sensor is on the server. True = Allow, False do not Allow, None
use the class default.
Notes
-----
Ensure that self.state.data_synced == True if yielding to future_check_request
from a state-change callback, or a deadlock will occur.
### Response:
def future_check_request(self, name, update=None):
"""Check if the request exists.
Used internally by future_get_request. This method is aware of
synchronisation in progress and if inspection of the server is allowed.
Parameters
----------
name : str
Name of the request to verify.
update : bool or None, optional
If a katcp request to the server should be made to check if the
sensor is on the server. True = Allow, False do not Allow, None
use the class default.
Notes
-----
Ensure that self.state.data_synced == True if yielding to future_check_request
from a state-change callback, or a deadlock will occur.
"""
exist = False
yield self.until_data_synced()
if name in self._requests_index:
exist = True
else:
if update or (update is None and self._update_on_lookup):
yield self.inspect_requests(name)
exist = yield self.future_check_request(name, False)
raise tornado.gen.Return(exist) |
def client_has_user_consent(self):
"""
Check if already exists user consent for some client.
Return bool.
"""
value = False
try:
uc = UserConsent.objects.get(user=self.request.user, client=self.client)
if (set(self.params['scope']).issubset(uc.scope)) and not (uc.has_expired()):
value = True
except UserConsent.DoesNotExist:
pass
return value | Check if already exists user consent for some client.
Return bool. | Below is the the instruction that describes the task:
### Input:
Check if already exists user consent for some client.
Return bool.
### Response:
def client_has_user_consent(self):
"""
Check if already exists user consent for some client.
Return bool.
"""
value = False
try:
uc = UserConsent.objects.get(user=self.request.user, client=self.client)
if (set(self.params['scope']).issubset(uc.scope)) and not (uc.has_expired()):
value = True
except UserConsent.DoesNotExist:
pass
return value |
def note_on(self, channel, note, velocity):
"""Return bytes for a 'note_on' event."""
return self.midi_event(NOTE_ON, channel, note, velocity) | Return bytes for a 'note_on' event. | Below is the the instruction that describes the task:
### Input:
Return bytes for a 'note_on' event.
### Response:
def note_on(self, channel, note, velocity):
"""Return bytes for a 'note_on' event."""
return self.midi_event(NOTE_ON, channel, note, velocity) |
def joint(node):
"""Merge the bodies of primal and adjoint into a single function.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
Returns:
func: A `Module` node with a single function definition containing the
combined primal and adjoint.
"""
node, _, _ = _fix(node)
body = node.body[0].body[:-1] + node.body[1].body
func = gast.Module(body=[gast.FunctionDef(
name=node.body[0].name, args=node.body[1].args, body=body,
decorator_list=[], returns=None)])
# Clean up
anno.clearanno(func)
return func | Merge the bodies of primal and adjoint into a single function.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
Returns:
func: A `Module` node with a single function definition containing the
combined primal and adjoint. | Below is the the instruction that describes the task:
### Input:
Merge the bodies of primal and adjoint into a single function.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
Returns:
func: A `Module` node with a single function definition containing the
combined primal and adjoint.
### Response:
def joint(node):
"""Merge the bodies of primal and adjoint into a single function.
Args:
node: A module with the primal and adjoint function definitions as returned
by `reverse_ad`.
Returns:
func: A `Module` node with a single function definition containing the
combined primal and adjoint.
"""
node, _, _ = _fix(node)
body = node.body[0].body[:-1] + node.body[1].body
func = gast.Module(body=[gast.FunctionDef(
name=node.body[0].name, args=node.body[1].args, body=body,
decorator_list=[], returns=None)])
# Clean up
anno.clearanno(func)
return func |
def load_spitzer_catalog(show_progress=False): # pragma: no cover
"""
Load a 4.5 micron Spitzer catalog.
The image from which this catalog was derived is returned by
:func:`load_spitzer_image`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
catalog : `~astropy.table.Table`
The catalog of sources.
See Also
--------
load_spitzer_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
catalog = datasets.load_spitzer_catalog()
plt.scatter(catalog['l'], catalog['b'])
plt.xlabel('Galactic l')
plt.ylabel('Galactic b')
plt.xlim(18.39, 18.05)
plt.ylim(0.13, 0.30)
"""
path = get_path('spitzer_example_catalog.xml', location='remote',
show_progress=show_progress)
table = Table.read(path)
return table | Load a 4.5 micron Spitzer catalog.
The image from which this catalog was derived is returned by
:func:`load_spitzer_image`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
catalog : `~astropy.table.Table`
The catalog of sources.
See Also
--------
load_spitzer_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
catalog = datasets.load_spitzer_catalog()
plt.scatter(catalog['l'], catalog['b'])
plt.xlabel('Galactic l')
plt.ylabel('Galactic b')
plt.xlim(18.39, 18.05)
plt.ylim(0.13, 0.30) | Below is the the instruction that describes the task:
### Input:
Load a 4.5 micron Spitzer catalog.
The image from which this catalog was derived is returned by
:func:`load_spitzer_image`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
catalog : `~astropy.table.Table`
The catalog of sources.
See Also
--------
load_spitzer_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
catalog = datasets.load_spitzer_catalog()
plt.scatter(catalog['l'], catalog['b'])
plt.xlabel('Galactic l')
plt.ylabel('Galactic b')
plt.xlim(18.39, 18.05)
plt.ylim(0.13, 0.30)
### Response:
def load_spitzer_catalog(show_progress=False): # pragma: no cover
"""
Load a 4.5 micron Spitzer catalog.
The image from which this catalog was derived is returned by
:func:`load_spitzer_image`.
Parameters
----------
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `False`).
Returns
-------
catalog : `~astropy.table.Table`
The catalog of sources.
See Also
--------
load_spitzer_image
Examples
--------
.. plot::
:include-source:
from photutils import datasets
catalog = datasets.load_spitzer_catalog()
plt.scatter(catalog['l'], catalog['b'])
plt.xlabel('Galactic l')
plt.ylabel('Galactic b')
plt.xlim(18.39, 18.05)
plt.ylim(0.13, 0.30)
"""
path = get_path('spitzer_example_catalog.xml', location='remote',
show_progress=show_progress)
table = Table.read(path)
return table |
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x) | Return number of samples in array-like x. | Below is the the instruction that describes the task:
### Input:
Return number of samples in array-like x.
### Response:
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x) |
def handle_data(self, data):
"""Callback when the data of a tag has been collected."""
# Only process the data when we are in an active a tag and have an URL.
if not self.active_url:
return
# The visible text can have a final slash so strip it off
if data.strip('/') == self.active_url:
self.entries.append(self.active_url) | Callback when the data of a tag has been collected. | Below is the the instruction that describes the task:
### Input:
Callback when the data of a tag has been collected.
### Response:
def handle_data(self, data):
"""Callback when the data of a tag has been collected."""
# Only process the data when we are in an active a tag and have an URL.
if not self.active_url:
return
# The visible text can have a final slash so strip it off
if data.strip('/') == self.active_url:
self.entries.append(self.active_url) |
def gamma_centered(cls, kpts=(1, 1, 1), use_symmetries=True, use_time_reversal=True):
"""
Convenient static constructor for an automatic Gamma centered Kpoint grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
Returns:
:class:`KSampling` object.
"""
return cls(kpts=[kpts], kpt_shifts=(0.0, 0.0, 0.0),
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,
comment="gamma-centered mode") | Convenient static constructor for an automatic Gamma centered Kpoint grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
Returns:
:class:`KSampling` object. | Below is the the instruction that describes the task:
### Input:
Convenient static constructor for an automatic Gamma centered Kpoint grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
Returns:
:class:`KSampling` object.
### Response:
def gamma_centered(cls, kpts=(1, 1, 1), use_symmetries=True, use_time_reversal=True):
"""
Convenient static constructor for an automatic Gamma centered Kpoint grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice vectors.
use_symmetries: False if spatial symmetries should not be used
to reduce the number of independent k-points.
use_time_reversal: False if time-reversal symmetry should not be used
to reduce the number of independent k-points.
Returns:
:class:`KSampling` object.
"""
return cls(kpts=[kpts], kpt_shifts=(0.0, 0.0, 0.0),
use_symmetries=use_symmetries, use_time_reversal=use_time_reversal,
comment="gamma-centered mode") |
def remove_origin(self, account_id, origin_id):
"""Removes an origin pull mapping with the given origin pull ID.
:param int account_id: the CDN account ID from which the mapping should
be deleted.
:param int origin_id: the origin pull mapping ID to delete.
"""
return self.account.deleteOriginPullRule(origin_id, id=account_id) | Removes an origin pull mapping with the given origin pull ID.
:param int account_id: the CDN account ID from which the mapping should
be deleted.
:param int origin_id: the origin pull mapping ID to delete. | Below is the the instruction that describes the task:
### Input:
Removes an origin pull mapping with the given origin pull ID.
:param int account_id: the CDN account ID from which the mapping should
be deleted.
:param int origin_id: the origin pull mapping ID to delete.
### Response:
def remove_origin(self, account_id, origin_id):
"""Removes an origin pull mapping with the given origin pull ID.
:param int account_id: the CDN account ID from which the mapping should
be deleted.
:param int origin_id: the origin pull mapping ID to delete.
"""
return self.account.deleteOriginPullRule(origin_id, id=account_id) |
def groups(self):
"""Set of groups defined in the roster.
:Return: the groups
:ReturnType: `set` of `unicode`
"""
groups = set()
for item in self._items:
groups |= item.groups
return groups | Set of groups defined in the roster.
:Return: the groups
:ReturnType: `set` of `unicode` | Below is the the instruction that describes the task:
### Input:
Set of groups defined in the roster.
:Return: the groups
:ReturnType: `set` of `unicode`
### Response:
def groups(self):
"""Set of groups defined in the roster.
:Return: the groups
:ReturnType: `set` of `unicode`
"""
groups = set()
for item in self._items:
groups |= item.groups
return groups |
def setupTable_cmap(self):
"""
Make the cmap table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "cmap" not in self.tables:
return
from fontTools.ttLib.tables._c_m_a_p import cmap_format_4
nonBMP = dict((k,v) for k,v in self.unicodeToGlyphNameMapping.items() if k > 65535)
if nonBMP:
mapping = dict((k,v) for k,v in self.unicodeToGlyphNameMapping.items() if k <= 65535)
else:
mapping = dict(self.unicodeToGlyphNameMapping)
# mac
cmap4_0_3 = cmap_format_4(4)
cmap4_0_3.platformID = 0
cmap4_0_3.platEncID = 3
cmap4_0_3.language = 0
cmap4_0_3.cmap = mapping
# windows
cmap4_3_1 = cmap_format_4(4)
cmap4_3_1.platformID = 3
cmap4_3_1.platEncID = 1
cmap4_3_1.language = 0
cmap4_3_1.cmap = mapping
# store
self.otf["cmap"] = cmap = newTable("cmap")
cmap.tableVersion = 0
cmap.tables = [cmap4_0_3, cmap4_3_1]
# If we have glyphs outside Unicode BMP, we must set another
# subtable that can hold longer codepoints for them.
if nonBMP:
from fontTools.ttLib.tables._c_m_a_p import cmap_format_12
nonBMP.update(mapping)
# mac
cmap12_0_4 = cmap_format_12(12)
cmap12_0_4.platformID = 0
cmap12_0_4.platEncID = 4
cmap12_0_4.language = 0
cmap12_0_4.cmap = nonBMP
# windows
cmap12_3_10 = cmap_format_12(12)
cmap12_3_10.platformID = 3
cmap12_3_10.platEncID = 10
cmap12_3_10.language = 0
cmap12_3_10.cmap = nonBMP
# update tables registry
cmap.tables = [cmap4_0_3, cmap4_3_1, cmap12_0_4, cmap12_3_10] | Make the cmap table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired. | Below is the the instruction that describes the task:
### Input:
Make the cmap table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
### Response:
def setupTable_cmap(self):
"""
Make the cmap table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "cmap" not in self.tables:
return
from fontTools.ttLib.tables._c_m_a_p import cmap_format_4
nonBMP = dict((k,v) for k,v in self.unicodeToGlyphNameMapping.items() if k > 65535)
if nonBMP:
mapping = dict((k,v) for k,v in self.unicodeToGlyphNameMapping.items() if k <= 65535)
else:
mapping = dict(self.unicodeToGlyphNameMapping)
# mac
cmap4_0_3 = cmap_format_4(4)
cmap4_0_3.platformID = 0
cmap4_0_3.platEncID = 3
cmap4_0_3.language = 0
cmap4_0_3.cmap = mapping
# windows
cmap4_3_1 = cmap_format_4(4)
cmap4_3_1.platformID = 3
cmap4_3_1.platEncID = 1
cmap4_3_1.language = 0
cmap4_3_1.cmap = mapping
# store
self.otf["cmap"] = cmap = newTable("cmap")
cmap.tableVersion = 0
cmap.tables = [cmap4_0_3, cmap4_3_1]
# If we have glyphs outside Unicode BMP, we must set another
# subtable that can hold longer codepoints for them.
if nonBMP:
from fontTools.ttLib.tables._c_m_a_p import cmap_format_12
nonBMP.update(mapping)
# mac
cmap12_0_4 = cmap_format_12(12)
cmap12_0_4.platformID = 0
cmap12_0_4.platEncID = 4
cmap12_0_4.language = 0
cmap12_0_4.cmap = nonBMP
# windows
cmap12_3_10 = cmap_format_12(12)
cmap12_3_10.platformID = 3
cmap12_3_10.platEncID = 10
cmap12_3_10.language = 0
cmap12_3_10.cmap = nonBMP
# update tables registry
cmap.tables = [cmap4_0_3, cmap4_3_1, cmap12_0_4, cmap12_3_10] |
def _cb_created(self, payload, duplicated):
"""Indirect callback (via Client) for point & subscription creation responses"""
if payload[P_RESOURCE] in _POINT_TYPE_TO_CLASS:
store = self.__new_feeds if payload[P_RESOURCE] == R_FEED else self.__new_controls
cls = _POINT_TYPE_TO_CLASS[payload[P_RESOURCE]]
with store:
store[payload[P_LID]] = cls(self._client, payload[P_ENTITY_LID], payload[P_LID], payload[P_ID])
logger.debug('Added %s: %s (for %s)', foc_to_str(payload[P_RESOURCE]), payload[P_LID],
payload[P_ENTITY_LID])
elif payload[P_RESOURCE] == R_SUB:
# local
if P_POINT_ENTITY_LID in payload:
key = (payload[P_POINT_TYPE], (payload[P_POINT_ENTITY_LID], payload[P_POINT_LID]))
# global
else:
key = (payload[P_POINT_TYPE], payload[P_POINT_ID])
new_subs = self.__new_subs
with new_subs:
if key in new_subs:
cls = RemoteFeed if payload[P_POINT_TYPE] == R_FEED else RemoteControl
new_subs[key] = cls(self._client, payload[P_ID], payload[P_POINT_ID], payload[P_ENTITY_LID])
else:
logger.warning('Ignoring subscription creation for unexpected %s: %s',
foc_to_str(payload[P_POINT_TYPE]), key[1])
else:
logger.error('Resource creation of type %d unhandled', payload[P_RESOURCE]) | Indirect callback (via Client) for point & subscription creation responses | Below is the the instruction that describes the task:
### Input:
Indirect callback (via Client) for point & subscription creation responses
### Response:
def _cb_created(self, payload, duplicated):
"""Indirect callback (via Client) for point & subscription creation responses"""
if payload[P_RESOURCE] in _POINT_TYPE_TO_CLASS:
store = self.__new_feeds if payload[P_RESOURCE] == R_FEED else self.__new_controls
cls = _POINT_TYPE_TO_CLASS[payload[P_RESOURCE]]
with store:
store[payload[P_LID]] = cls(self._client, payload[P_ENTITY_LID], payload[P_LID], payload[P_ID])
logger.debug('Added %s: %s (for %s)', foc_to_str(payload[P_RESOURCE]), payload[P_LID],
payload[P_ENTITY_LID])
elif payload[P_RESOURCE] == R_SUB:
# local
if P_POINT_ENTITY_LID in payload:
key = (payload[P_POINT_TYPE], (payload[P_POINT_ENTITY_LID], payload[P_POINT_LID]))
# global
else:
key = (payload[P_POINT_TYPE], payload[P_POINT_ID])
new_subs = self.__new_subs
with new_subs:
if key in new_subs:
cls = RemoteFeed if payload[P_POINT_TYPE] == R_FEED else RemoteControl
new_subs[key] = cls(self._client, payload[P_ID], payload[P_POINT_ID], payload[P_ENTITY_LID])
else:
logger.warning('Ignoring subscription creation for unexpected %s: %s',
foc_to_str(payload[P_POINT_TYPE]), key[1])
else:
logger.error('Resource creation of type %d unhandled', payload[P_RESOURCE]) |
def translate_addresstype(f):
"""decorator to translate the addressType field.
translate the value of the addressType field of the API response into a
translated type.
"""
@wraps(f)
def wr(r, pc):
at = r["addressType"]
try:
r.update({"addressType": POSTCODE_API_TYPEDEFS_ADDRESS_TYPES[at]})
except:
logger.warning("Warning: {}: "
"unknown 'addressType': {}".format(pc, at))
return f(r, pc)
return wr | decorator to translate the addressType field.
translate the value of the addressType field of the API response into a
translated type. | Below is the the instruction that describes the task:
### Input:
decorator to translate the addressType field.
translate the value of the addressType field of the API response into a
translated type.
### Response:
def translate_addresstype(f):
"""decorator to translate the addressType field.
translate the value of the addressType field of the API response into a
translated type.
"""
@wraps(f)
def wr(r, pc):
at = r["addressType"]
try:
r.update({"addressType": POSTCODE_API_TYPEDEFS_ADDRESS_TYPES[at]})
except:
logger.warning("Warning: {}: "
"unknown 'addressType': {}".format(pc, at))
return f(r, pc)
return wr |
def update_task_redundancy(config, task_id, redundancy):
"""Update task redudancy for a project."""
if task_id is None:
msg = ("Are you sure you want to update all the tasks redundancy?")
if click.confirm(msg):
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res) | Update task redudancy for a project. | Below is the the instruction that describes the task:
### Input:
Update task redudancy for a project.
### Response:
def update_task_redundancy(config, task_id, redundancy):
"""Update task redudancy for a project."""
if task_id is None:
msg = ("Are you sure you want to update all the tasks redundancy?")
if click.confirm(msg):
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res)
else:
click.echo("Aborting.")
else:
res = _update_tasks_redundancy(config, task_id, redundancy)
click.echo(res) |
def update_col(self, column_name, series):
"""
Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data.
"""
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
self.local[column_name] = series | Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data. | Below is the the instruction that describes the task:
### Input:
Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data.
### Response:
def update_col(self, column_name, series):
"""
Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data.
"""
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
self.local[column_name] = series |
def appendpickle(table, source=None, protocol=-1, write_header=False):
"""
Append data to an existing pickle file. I.e.,
as :func:`petl.io.pickle.topickle` but the file is opened in append mode.
Note that no attempt is made to check that the fields or row lengths are
consistent with the existing data, the data rows from the table are simply
appended to the file.
"""
_writepickle(table, source=source, mode='ab', protocol=protocol,
write_header=write_header) | Append data to an existing pickle file. I.e.,
as :func:`petl.io.pickle.topickle` but the file is opened in append mode.
Note that no attempt is made to check that the fields or row lengths are
consistent with the existing data, the data rows from the table are simply
appended to the file. | Below is the the instruction that describes the task:
### Input:
Append data to an existing pickle file. I.e.,
as :func:`petl.io.pickle.topickle` but the file is opened in append mode.
Note that no attempt is made to check that the fields or row lengths are
consistent with the existing data, the data rows from the table are simply
appended to the file.
### Response:
def appendpickle(table, source=None, protocol=-1, write_header=False):
"""
Append data to an existing pickle file. I.e.,
as :func:`petl.io.pickle.topickle` but the file is opened in append mode.
Note that no attempt is made to check that the fields or row lengths are
consistent with the existing data, the data rows from the table are simply
appended to the file.
"""
_writepickle(table, source=source, mode='ab', protocol=protocol,
write_header=write_header) |
def show_feature_destibution(self, data = None):
"""!
@brief Shows feature distribution.
@details Only features in 1D, 2D, 3D space can be visualized.
@param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only.
"""
visualizer = cluster_visualizer();
print("amount of nodes: ", self.__amount_nodes);
if (data is not None):
visualizer.append_cluster(data, marker = 'x');
for level in range(0, self.height):
level_nodes = self.get_level_nodes(level);
centers = [ node.feature.get_centroid() for node in level_nodes ];
visualizer.append_cluster(centers, None, markersize = (self.height - level + 1) * 5);
visualizer.show(); | !
@brief Shows feature distribution.
@details Only features in 1D, 2D, 3D space can be visualized.
@param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only. | Below is the the instruction that describes the task:
### Input:
!
@brief Shows feature distribution.
@details Only features in 1D, 2D, 3D space can be visualized.
@param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only.
### Response:
def show_feature_destibution(self, data = None):
"""!
@brief Shows feature distribution.
@details Only features in 1D, 2D, 3D space can be visualized.
@param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only.
"""
visualizer = cluster_visualizer();
print("amount of nodes: ", self.__amount_nodes);
if (data is not None):
visualizer.append_cluster(data, marker = 'x');
for level in range(0, self.height):
level_nodes = self.get_level_nodes(level);
centers = [ node.feature.get_centroid() for node in level_nodes ];
visualizer.append_cluster(centers, None, markersize = (self.height - level + 1) * 5);
visualizer.show(); |
def lookup_endpoint(cli):
"""Looks up the application endpoint from dotcloud"""
url = '/applications/{0}/environment'.format(APPNAME)
environ = cli.user.get(url).item
port = environ['DOTCLOUD_SATELLITE_ZMQ_PORT']
host = socket.gethostbyname(environ['DOTCLOUD_SATELLITE_ZMQ_HOST'])
return "tcp://{0}:{1}".format(host, port) | Looks up the application endpoint from dotcloud | Below is the the instruction that describes the task:
### Input:
Looks up the application endpoint from dotcloud
### Response:
def lookup_endpoint(cli):
"""Looks up the application endpoint from dotcloud"""
url = '/applications/{0}/environment'.format(APPNAME)
environ = cli.user.get(url).item
port = environ['DOTCLOUD_SATELLITE_ZMQ_PORT']
host = socket.gethostbyname(environ['DOTCLOUD_SATELLITE_ZMQ_HOST'])
return "tcp://{0}:{1}".format(host, port) |
def Decompress(self, compressed_data):
"""Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the zlib compressed stream cannot be decompressed.
"""
try:
uncompressed_data = self._zlib_decompressor.decompress(compressed_data)
remaining_compressed_data = getattr(
self._zlib_decompressor, 'unused_data', b'')
except zlib.error as exception:
raise errors.BackEndError((
'Unable to decompress zlib compressed stream with error: '
'{0!s}.').format(exception))
return uncompressed_data, remaining_compressed_data | Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the zlib compressed stream cannot be decompressed. | Below is the the instruction that describes the task:
### Input:
Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the zlib compressed stream cannot be decompressed.
### Response:
def Decompress(self, compressed_data):
"""Decompresses the compressed data.
Args:
compressed_data (bytes): compressed data.
Returns:
tuple(bytes, bytes): uncompressed data and remaining compressed data.
Raises:
BackEndError: if the zlib compressed stream cannot be decompressed.
"""
try:
uncompressed_data = self._zlib_decompressor.decompress(compressed_data)
remaining_compressed_data = getattr(
self._zlib_decompressor, 'unused_data', b'')
except zlib.error as exception:
raise errors.BackEndError((
'Unable to decompress zlib compressed stream with error: '
'{0!s}.').format(exception))
return uncompressed_data, remaining_compressed_data |
def add_book_series(self, title, volume=None):
"""
:param volume: the volume of the book
:type volume: string
:param title: the title of the book
:type title: string
"""
book_series = {}
if title is not None:
book_series['title'] = title
if volume is not None:
book_series['volume'] = volume
self._append_to('book_series', book_series) | :param volume: the volume of the book
:type volume: string
:param title: the title of the book
:type title: string | Below is the the instruction that describes the task:
### Input:
:param volume: the volume of the book
:type volume: string
:param title: the title of the book
:type title: string
### Response:
def add_book_series(self, title, volume=None):
"""
:param volume: the volume of the book
:type volume: string
:param title: the title of the book
:type title: string
"""
book_series = {}
if title is not None:
book_series['title'] = title
if volume is not None:
book_series['volume'] = volume
self._append_to('book_series', book_series) |
def parse_complex(tree_to_parse, xpath_root, xpath_map, complex_key):
"""
Creates and returns a Dictionary data structure parsed from the metadata.
:param tree_to_parse: the XML tree compatible with element_utils to be parsed
:param xpath_root: the XPATH location of the structure inside the parent element
:param xpath_map: a dict of XPATHs corresponding to a complex definition
:param complex_key: indicates which complex definition describes the structure
"""
complex_struct = {}
for prop in _complex_definitions.get(complex_key, xpath_map):
# Normalize complex values: treat values with newlines like values from separate elements
parsed = parse_property(tree_to_parse, xpath_root, xpath_map, prop)
parsed = reduce_value(flatten_items(v.split(_COMPLEX_DELIM) for v in wrap_value(parsed)))
complex_struct[prop] = get_default_for_complex_sub(complex_key, prop, parsed, xpath_map[prop])
return complex_struct if any(complex_struct.values()) else {} | Creates and returns a Dictionary data structure parsed from the metadata.
:param tree_to_parse: the XML tree compatible with element_utils to be parsed
:param xpath_root: the XPATH location of the structure inside the parent element
:param xpath_map: a dict of XPATHs corresponding to a complex definition
:param complex_key: indicates which complex definition describes the structure | Below is the the instruction that describes the task:
### Input:
Creates and returns a Dictionary data structure parsed from the metadata.
:param tree_to_parse: the XML tree compatible with element_utils to be parsed
:param xpath_root: the XPATH location of the structure inside the parent element
:param xpath_map: a dict of XPATHs corresponding to a complex definition
:param complex_key: indicates which complex definition describes the structure
### Response:
def parse_complex(tree_to_parse, xpath_root, xpath_map, complex_key):
"""
Creates and returns a Dictionary data structure parsed from the metadata.
:param tree_to_parse: the XML tree compatible with element_utils to be parsed
:param xpath_root: the XPATH location of the structure inside the parent element
:param xpath_map: a dict of XPATHs corresponding to a complex definition
:param complex_key: indicates which complex definition describes the structure
"""
complex_struct = {}
for prop in _complex_definitions.get(complex_key, xpath_map):
# Normalize complex values: treat values with newlines like values from separate elements
parsed = parse_property(tree_to_parse, xpath_root, xpath_map, prop)
parsed = reduce_value(flatten_items(v.split(_COMPLEX_DELIM) for v in wrap_value(parsed)))
complex_struct[prop] = get_default_for_complex_sub(complex_key, prop, parsed, xpath_map[prop])
return complex_struct if any(complex_struct.values()) else {} |
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except ImportError as e:
LOG.warning("Could not import panel module %(module)s: "
"%(exc)s", {'module': mod_path, 'exc': e})
return
panel = getattr(mod, panel_cls)
# test is can_register method is present and call method if
# it is to determine if the panel should be loaded
if hasattr(panel, 'can_register') and \
callable(getattr(panel, 'can_register')):
if not panel.can_register():
LOG.debug("Load condition failed for panel: %(panel)s",
{'panel': panel_slug})
return
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e}) | Add, remove and set default panels on the dashboard. | Below is the the instruction that describes the task:
### Input:
Add, remove and set default panels on the dashboard.
### Response:
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except ImportError as e:
LOG.warning("Could not import panel module %(module)s: "
"%(exc)s", {'module': mod_path, 'exc': e})
return
panel = getattr(mod, panel_cls)
# test is can_register method is present and call method if
# it is to determine if the panel should be loaded
if hasattr(panel, 'can_register') and \
callable(getattr(panel, 'can_register')):
if not panel.can_register():
LOG.debug("Load condition failed for panel: %(panel)s",
{'panel': panel_slug})
return
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e}) |
def updateData(self, data):
""" Updates the data used by the renderer.
"""
# pylab.ion()
fig = pylab.figure(1)
n_agent = len(data)
idx = 1
for i, adata in enumerate(data):
saxis = fig.add_subplot(3, n_agent, i + 1)
saxis.plot(adata[0])
idx += 1
aaxis = fig.add_subplot(3, n_agent, i + 1 + n_agent)
aaxis.plot(adata[1])
idx += 1
raxis = fig.add_subplot(3, n_agent, i + 1 + (n_agent * 2))
raxis.plot(adata[2])
idx += 1
pylab.show() | Updates the data used by the renderer. | Below is the the instruction that describes the task:
### Input:
Updates the data used by the renderer.
### Response:
def updateData(self, data):
""" Updates the data used by the renderer.
"""
# pylab.ion()
fig = pylab.figure(1)
n_agent = len(data)
idx = 1
for i, adata in enumerate(data):
saxis = fig.add_subplot(3, n_agent, i + 1)
saxis.plot(adata[0])
idx += 1
aaxis = fig.add_subplot(3, n_agent, i + 1 + n_agent)
aaxis.plot(adata[1])
idx += 1
raxis = fig.add_subplot(3, n_agent, i + 1 + (n_agent * 2))
raxis.plot(adata[2])
idx += 1
pylab.show() |
def is_prefix(cls, path):
"""
Check if a path is a valid prefix
Args:
path(str): path to be checked
Returns:
bool: True if the given path is a prefix
"""
lagofile = paths.Paths(path).prefix_lagofile()
return os.path.isfile(lagofile) | Check if a path is a valid prefix
Args:
path(str): path to be checked
Returns:
bool: True if the given path is a prefix | Below is the the instruction that describes the task:
### Input:
Check if a path is a valid prefix
Args:
path(str): path to be checked
Returns:
bool: True if the given path is a prefix
### Response:
def is_prefix(cls, path):
"""
Check if a path is a valid prefix
Args:
path(str): path to be checked
Returns:
bool: True if the given path is a prefix
"""
lagofile = paths.Paths(path).prefix_lagofile()
return os.path.isfile(lagofile) |
def predict(self, h=5):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
Returns
----------
- pd.DataFrame with predicted values
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
predictions, _, _, _ = self._construct_predict(self.latent_variables.get_z_values(),h)
predictions = predictions*self._norm_std + self._norm_mean
date_index = self.shift_dates(h)
result = pd.DataFrame(predictions)
result.rename(columns={0:self.data_name}, inplace=True)
result.index = date_index[-h:]
return result | Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
Returns
----------
- pd.DataFrame with predicted values | Below is the the instruction that describes the task:
### Input:
Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
Returns
----------
- pd.DataFrame with predicted values
### Response:
def predict(self, h=5):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
Returns
----------
- pd.DataFrame with predicted values
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
predictions, _, _, _ = self._construct_predict(self.latent_variables.get_z_values(),h)
predictions = predictions*self._norm_std + self._norm_mean
date_index = self.shift_dates(h)
result = pd.DataFrame(predictions)
result.rename(columns={0:self.data_name}, inplace=True)
result.index = date_index[-h:]
return result |
def callback(self, request, **kwargs):
"""
Called from the Service when the user accept to activate it
"""
try:
client = self.get_evernote_client()
# finally we save the user auth token
# As we already stored the object ServicesActivated
# from the UserServiceCreateView now we update the same
# object to the database so :
# 1) we get the previous object
us = UserService.objects.get(user=request.user, name=ServicesActivated.objects.get(name='ServiceEvernote'))
# 2) then get the token
us.token = client.get_access_token(request.session['oauth_token'], request.session['oauth_token_secret'],
request.GET.get('oauth_verifier', ''))
# 3) and save everything
us.save()
except KeyError:
return '/'
return 'evernote/callback.html' | Called from the Service when the user accept to activate it | Below is the the instruction that describes the task:
### Input:
Called from the Service when the user accept to activate it
### Response:
def callback(self, request, **kwargs):
"""
Called from the Service when the user accept to activate it
"""
try:
client = self.get_evernote_client()
# finally we save the user auth token
# As we already stored the object ServicesActivated
# from the UserServiceCreateView now we update the same
# object to the database so :
# 1) we get the previous object
us = UserService.objects.get(user=request.user, name=ServicesActivated.objects.get(name='ServiceEvernote'))
# 2) then get the token
us.token = client.get_access_token(request.session['oauth_token'], request.session['oauth_token_secret'],
request.GET.get('oauth_verifier', ''))
# 3) and save everything
us.save()
except KeyError:
return '/'
return 'evernote/callback.html' |
def download_data(self, configuration, output_file):
"""
Выполняет указанный в конфигурации запрос и отдает файл на скачивание
:param configuration: Конфгурация запроса
:param output_file: Место, куда надо скачать файл
:return:
"""
params = configuration
response = self.__app.native_api_call('metaql', 'download-data', params, self.__options, False, None, True, http_path="/api/v1/meta/")
with open(output_file, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response | Выполняет указанный в конфигурации запрос и отдает файл на скачивание
:param configuration: Конфгурация запроса
:param output_file: Место, куда надо скачать файл
:return: | Below is the the instruction that describes the task:
### Input:
Выполняет указанный в конфигурации запрос и отдает файл на скачивание
:param configuration: Конфгурация запроса
:param output_file: Место, куда надо скачать файл
:return:
### Response:
def download_data(self, configuration, output_file):
"""
Выполняет указанный в конфигурации запрос и отдает файл на скачивание
:param configuration: Конфгурация запроса
:param output_file: Место, куда надо скачать файл
:return:
"""
params = configuration
response = self.__app.native_api_call('metaql', 'download-data', params, self.__options, False, None, True, http_path="/api/v1/meta/")
with open(output_file, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response |
def make_app():
"""Helper function that creates a plnt app."""
from plnt import Plnt
database_uri = os.environ.get("PLNT_DATABASE_URI")
app = Plnt(database_uri or "sqlite:////tmp/plnt.db")
app.bind_to_context()
return app | Helper function that creates a plnt app. | Below is the the instruction that describes the task:
### Input:
Helper function that creates a plnt app.
### Response:
def make_app():
"""Helper function that creates a plnt app."""
from plnt import Plnt
database_uri = os.environ.get("PLNT_DATABASE_URI")
app = Plnt(database_uri or "sqlite:////tmp/plnt.db")
app.bind_to_context()
return app |
def with_env(self, **environment_variables):
"""
Return new Command object that will be run with additional
environment variables.
Specify environment variables as follows:
new_cmd = old_cmd.with_env(PYTHON_PATH=".", ENV_PORT="2022")
"""
new_env_vars = {
str(var): str(val) for var, val in environment_variables.items()
}
new_command = copy.deepcopy(self)
new_command._env.update(new_env_vars)
return new_command | Return new Command object that will be run with additional
environment variables.
Specify environment variables as follows:
new_cmd = old_cmd.with_env(PYTHON_PATH=".", ENV_PORT="2022") | Below is the the instruction that describes the task:
### Input:
Return new Command object that will be run with additional
environment variables.
Specify environment variables as follows:
new_cmd = old_cmd.with_env(PYTHON_PATH=".", ENV_PORT="2022")
### Response:
def with_env(self, **environment_variables):
"""
Return new Command object that will be run with additional
environment variables.
Specify environment variables as follows:
new_cmd = old_cmd.with_env(PYTHON_PATH=".", ENV_PORT="2022")
"""
new_env_vars = {
str(var): str(val) for var, val in environment_variables.items()
}
new_command = copy.deepcopy(self)
new_command._env.update(new_env_vars)
return new_command |
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
java = SubprocessExecutor(self._distribution)
subproc = java.spawn(classpath=classpath,
main='com.martiansoftware.nailgun.NGServer',
jvm_options=jvm_options,
args=[':0'],
stdin=safe_open('/dev/null', 'r'),
stdout=safe_open(self._ng_stdout, 'w'),
stderr=safe_open(self._ng_stderr, 'w'),
close_fds=True)
self.write_pid(subproc.pid) | Post-fork() child callback for ProcessManager.daemon_spawn(). | Below is the the instruction that describes the task:
### Input:
Post-fork() child callback for ProcessManager.daemon_spawn().
### Response:
def post_fork_child(self, fingerprint, jvm_options, classpath, stdout, stderr):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
java = SubprocessExecutor(self._distribution)
subproc = java.spawn(classpath=classpath,
main='com.martiansoftware.nailgun.NGServer',
jvm_options=jvm_options,
args=[':0'],
stdin=safe_open('/dev/null', 'r'),
stdout=safe_open(self._ng_stdout, 'w'),
stderr=safe_open(self._ng_stderr, 'w'),
close_fds=True)
self.write_pid(subproc.pid) |
def insert(self, key, item):
"""
Insert item into hash table with specified key and item.
If key is already present returns -1 and leaves existing item unchanged
Returns 0 on success.
"""
return lib.zhashx_insert(self._as_parameter_, key, item) | Insert item into hash table with specified key and item.
If key is already present returns -1 and leaves existing item unchanged
Returns 0 on success. | Below is the the instruction that describes the task:
### Input:
Insert item into hash table with specified key and item.
If key is already present returns -1 and leaves existing item unchanged
Returns 0 on success.
### Response:
def insert(self, key, item):
"""
Insert item into hash table with specified key and item.
If key is already present returns -1 and leaves existing item unchanged
Returns 0 on success.
"""
return lib.zhashx_insert(self._as_parameter_, key, item) |
def check_path(self, path):
"""
turns path into an absolute path and checks that it exists, then
returns it as a string.
"""
path = os.path.abspath(path)
if os.path.exists(path):
return path
else:
utils.die("input file does not exists:\n {}".format(path)) | turns path into an absolute path and checks that it exists, then
returns it as a string. | Below is the the instruction that describes the task:
### Input:
turns path into an absolute path and checks that it exists, then
returns it as a string.
### Response:
def check_path(self, path):
"""
turns path into an absolute path and checks that it exists, then
returns it as a string.
"""
path = os.path.abspath(path)
if os.path.exists(path):
return path
else:
utils.die("input file does not exists:\n {}".format(path)) |
def generate(self, more_content=None, all_members=False):
# type: (Any, str, bool, bool) -> None
"""Generate reST for the object given by *self.name*, and possibly for
its members.
If *more_content* is given, include that content.
If *all_members* is True, document all members.
"""
directive = getattr(self, 'directivetype', self.objtype)
# parse components out of name
(file, _, namepath) = self.name.rpartition(':')
(contract_name, _, fullname) = namepath.partition('.')
(name, _, paramtypes) = fullname.partition('(')
# normalize components
name = name.strip() or None
if directive in ('contract', 'interface', 'library') and name is None:
name = contract_name
contract_name = None
paramtypes = ','.join(ptype.strip() for ptype in paramtypes.split(','))
paramtypes = re.sub(r'\s+', ' ', paramtypes)
if paramtypes.endswith(')'):
paramtypes = paramtypes[:-1]
# build query
expressions = [
SolidityObject.objtype == directive,
SolidityObject.name == name,
]
if file:
expressions.append(SolidityObject.file == file)
if contract_name:
expressions.append(SolidityObject.contract_name == contract_name)
if paramtypes:
expressions.append(SolidityObject.paramtypes == paramtypes)
# get associated object
query = SolidityObject.select().where(*expressions)
sol_objects = tuple(query)
if len(sol_objects) == 0:
logger.warning('{} {} could not be found via query:\n{}'.format(
directive, self.name, ',\n'.join(
' ' + str(expr.lhs.column_name) +
str(expr.op) + ('' if expr.rhs is None else expr.rhs)
for expr in expressions
)))
return
elif len(sol_objects) > 1:
logger.warning('multiple candidates for {} {} found:\n{}'.format(
directive, self.name,
'\n'.join(' ' + obj.signature for obj in sol_objects)))
self.object = sol_objects[0]
# begin rendering output
sourcename = self.get_sourcename()
# make sure that the result starts with an empty line. This is
# necessary for some situations where another directive preprocesses
# reST and no starting newline is present
self.add_line('', sourcename)
# generate the directive header and options, if applicable
self.add_directive_header()
# make sure content is indented
# TODO: consider adding a source unit directive
self.indent += self.content_indent
# add all content (from docstrings, attribute docs etc.)
self.add_content(more_content)
# document members, if possible
if directive in ('contract', 'interface', 'library'):
self.add_line('', sourcename)
self.document_members(all_members) | Generate reST for the object given by *self.name*, and possibly for
its members.
If *more_content* is given, include that content.
If *all_members* is True, document all members. | Below is the the instruction that describes the task:
### Input:
Generate reST for the object given by *self.name*, and possibly for
its members.
If *more_content* is given, include that content.
If *all_members* is True, document all members.
### Response:
def generate(self, more_content=None, all_members=False):
# type: (Any, str, bool, bool) -> None
"""Generate reST for the object given by *self.name*, and possibly for
its members.
If *more_content* is given, include that content.
If *all_members* is True, document all members.
"""
directive = getattr(self, 'directivetype', self.objtype)
# parse components out of name
(file, _, namepath) = self.name.rpartition(':')
(contract_name, _, fullname) = namepath.partition('.')
(name, _, paramtypes) = fullname.partition('(')
# normalize components
name = name.strip() or None
if directive in ('contract', 'interface', 'library') and name is None:
name = contract_name
contract_name = None
paramtypes = ','.join(ptype.strip() for ptype in paramtypes.split(','))
paramtypes = re.sub(r'\s+', ' ', paramtypes)
if paramtypes.endswith(')'):
paramtypes = paramtypes[:-1]
# build query
expressions = [
SolidityObject.objtype == directive,
SolidityObject.name == name,
]
if file:
expressions.append(SolidityObject.file == file)
if contract_name:
expressions.append(SolidityObject.contract_name == contract_name)
if paramtypes:
expressions.append(SolidityObject.paramtypes == paramtypes)
# get associated object
query = SolidityObject.select().where(*expressions)
sol_objects = tuple(query)
if len(sol_objects) == 0:
logger.warning('{} {} could not be found via query:\n{}'.format(
directive, self.name, ',\n'.join(
' ' + str(expr.lhs.column_name) +
str(expr.op) + ('' if expr.rhs is None else expr.rhs)
for expr in expressions
)))
return
elif len(sol_objects) > 1:
logger.warning('multiple candidates for {} {} found:\n{}'.format(
directive, self.name,
'\n'.join(' ' + obj.signature for obj in sol_objects)))
self.object = sol_objects[0]
# begin rendering output
sourcename = self.get_sourcename()
# make sure that the result starts with an empty line. This is
# necessary for some situations where another directive preprocesses
# reST and no starting newline is present
self.add_line('', sourcename)
# generate the directive header and options, if applicable
self.add_directive_header()
# make sure content is indented
# TODO: consider adding a source unit directive
self.indent += self.content_indent
# add all content (from docstrings, attribute docs etc.)
self.add_content(more_content)
# document members, if possible
if directive in ('contract', 'interface', 'library'):
self.add_line('', sourcename)
self.document_members(all_members) |
def rm_docs(self):
"""Remove converted docs."""
for filename in self.created:
if os.path.exists(filename):
os.unlink(filename) | Remove converted docs. | Below is the the instruction that describes the task:
### Input:
Remove converted docs.
### Response:
def rm_docs(self):
"""Remove converted docs."""
for filename in self.created:
if os.path.exists(filename):
os.unlink(filename) |
def from_string(cls, cl_function, dependencies=()):
"""Parse the given CL function into a SimpleCLFunction object.
Args:
cl_function (str): the function we wish to turn into an object
dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on
Returns:
SimpleCLFunction: the CL data type for this parameter declaration
"""
return_type, function_name, parameter_list, body = split_cl_function(cl_function)
return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies) | Parse the given CL function into a SimpleCLFunction object.
Args:
cl_function (str): the function we wish to turn into an object
dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on
Returns:
SimpleCLFunction: the CL data type for this parameter declaration | Below is the the instruction that describes the task:
### Input:
Parse the given CL function into a SimpleCLFunction object.
Args:
cl_function (str): the function we wish to turn into an object
dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on
Returns:
SimpleCLFunction: the CL data type for this parameter declaration
### Response:
def from_string(cls, cl_function, dependencies=()):
"""Parse the given CL function into a SimpleCLFunction object.
Args:
cl_function (str): the function we wish to turn into an object
dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on
Returns:
SimpleCLFunction: the CL data type for this parameter declaration
"""
return_type, function_name, parameter_list, body = split_cl_function(cl_function)
return SimpleCLFunction(return_type, function_name, parameter_list, body, dependencies=dependencies) |
def copy_result(self, selection):
"""Returns result
If selection consists of one cell only and result is a bitmap then
the bitmap is returned.
Otherwise the method returns string representations of the result
for the given selection in a tab separated string.
"""
bbox = selection.get_bbox()
if not bbox:
# There is no selection
bb_top, bb_left = self.grid.actions.cursor[:2]
bb_bottom, bb_right = bb_top, bb_left
else:
# Thereis a selection
(bb_top, bb_left), (bb_bottom, bb_right) = bbox
if bb_top == bb_bottom and bb_left == bb_right:
# We have a single selection
tab = self.grid.current_table
result = self.grid.code_array[bb_top, bb_left, tab]
if isinstance(result, wx._gdi.Bitmap):
# The result is a wx.Bitmap. Return it.
return result
elif Figure is not None and isinstance(result, Figure):
# The result is a matplotlib figure
# Therefore, a wx.Bitmap is returned
key = bb_top, bb_left, tab
rect = self.grid.CellToRect(bb_top, bb_left)
merged_rect = self.grid.grid_renderer.get_merged_rect(
self.grid, key, rect)
dpi = float(wx.ScreenDC().GetPPI()[0])
zoom = self.grid.grid_renderer.zoom
return fig2bmp(result, merged_rect.width, merged_rect.height,
dpi, zoom)
# So we have result strings to be returned
getter = self._get_result_string
return self.copy(selection, getter=getter) | Returns result
If selection consists of one cell only and result is a bitmap then
the bitmap is returned.
Otherwise the method returns string representations of the result
for the given selection in a tab separated string. | Below is the the instruction that describes the task:
### Input:
Returns result
If selection consists of one cell only and result is a bitmap then
the bitmap is returned.
Otherwise the method returns string representations of the result
for the given selection in a tab separated string.
### Response:
def copy_result(self, selection):
"""Returns result
If selection consists of one cell only and result is a bitmap then
the bitmap is returned.
Otherwise the method returns string representations of the result
for the given selection in a tab separated string.
"""
bbox = selection.get_bbox()
if not bbox:
# There is no selection
bb_top, bb_left = self.grid.actions.cursor[:2]
bb_bottom, bb_right = bb_top, bb_left
else:
# Thereis a selection
(bb_top, bb_left), (bb_bottom, bb_right) = bbox
if bb_top == bb_bottom and bb_left == bb_right:
# We have a single selection
tab = self.grid.current_table
result = self.grid.code_array[bb_top, bb_left, tab]
if isinstance(result, wx._gdi.Bitmap):
# The result is a wx.Bitmap. Return it.
return result
elif Figure is not None and isinstance(result, Figure):
# The result is a matplotlib figure
# Therefore, a wx.Bitmap is returned
key = bb_top, bb_left, tab
rect = self.grid.CellToRect(bb_top, bb_left)
merged_rect = self.grid.grid_renderer.get_merged_rect(
self.grid, key, rect)
dpi = float(wx.ScreenDC().GetPPI()[0])
zoom = self.grid.grid_renderer.zoom
return fig2bmp(result, merged_rect.width, merged_rect.height,
dpi, zoom)
# So we have result strings to be returned
getter = self._get_result_string
return self.copy(selection, getter=getter) |
def _set_openflow_interface_cfg(self, v, load=False):
"""
Setter method for openflow_interface_cfg, mapped from YANG variable /interface/hundredgigabitethernet/openflow_interface_cfg (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_openflow_interface_cfg is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_openflow_interface_cfg() directly.
YANG Description: OpenFlow configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=openflow_interface_cfg.openflow_interface_cfg, is_container='container', presence=False, yang_name="openflow-interface-cfg", rest_name="openflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow configuration', u'callpoint': u'OpenFlowPhyInterfaceCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'sort-priority': u'108', u'alt-name': u'openflow'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """openflow_interface_cfg must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=openflow_interface_cfg.openflow_interface_cfg, is_container='container', presence=False, yang_name="openflow-interface-cfg", rest_name="openflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow configuration', u'callpoint': u'OpenFlowPhyInterfaceCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'sort-priority': u'108', u'alt-name': u'openflow'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""",
})
self.__openflow_interface_cfg = t
if hasattr(self, '_set'):
self._set() | Setter method for openflow_interface_cfg, mapped from YANG variable /interface/hundredgigabitethernet/openflow_interface_cfg (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_openflow_interface_cfg is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_openflow_interface_cfg() directly.
YANG Description: OpenFlow configuration. | Below is the the instruction that describes the task:
### Input:
Setter method for openflow_interface_cfg, mapped from YANG variable /interface/hundredgigabitethernet/openflow_interface_cfg (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_openflow_interface_cfg is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_openflow_interface_cfg() directly.
YANG Description: OpenFlow configuration.
### Response:
def _set_openflow_interface_cfg(self, v, load=False):
"""
Setter method for openflow_interface_cfg, mapped from YANG variable /interface/hundredgigabitethernet/openflow_interface_cfg (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_openflow_interface_cfg is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_openflow_interface_cfg() directly.
YANG Description: OpenFlow configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=openflow_interface_cfg.openflow_interface_cfg, is_container='container', presence=False, yang_name="openflow-interface-cfg", rest_name="openflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow configuration', u'callpoint': u'OpenFlowPhyInterfaceCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'sort-priority': u'108', u'alt-name': u'openflow'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """openflow_interface_cfg must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=openflow_interface_cfg.openflow_interface_cfg, is_container='container', presence=False, yang_name="openflow-interface-cfg", rest_name="openflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow configuration', u'callpoint': u'OpenFlowPhyInterfaceCallpoint', u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'sort-priority': u'108', u'alt-name': u'openflow'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)""",
})
self.__openflow_interface_cfg = t
if hasattr(self, '_set'):
self._set() |
def register_method(func, name=None, deprecated=False):
"""Register a method of calculating an average spectrogram.
Parameters
----------
func : `callable`
function to execute
name : `str`, optional
name of the method, defaults to ``func.__name__``
deprecated : `bool`, optional
whether this method is deprecated (`True`) or not (`False`)
Returns
-------
name : `str`
the registered name of the function, which may differ
pedantically from what was given by the user.
"""
# warn about deprecated functions
if deprecated:
func = deprecated_function(
func,
"the {0!r} PSD methods is deprecated, and will be removed "
"in a future release, please consider using {1!r} instead".format(
name, name.split('-', 1)[1],
),
)
if name is None:
name = func.__name__
name = _format_name(name)
METHODS[name] = func
return name | Register a method of calculating an average spectrogram.
Parameters
----------
func : `callable`
function to execute
name : `str`, optional
name of the method, defaults to ``func.__name__``
deprecated : `bool`, optional
whether this method is deprecated (`True`) or not (`False`)
Returns
-------
name : `str`
the registered name of the function, which may differ
pedantically from what was given by the user. | Below is the the instruction that describes the task:
### Input:
Register a method of calculating an average spectrogram.
Parameters
----------
func : `callable`
function to execute
name : `str`, optional
name of the method, defaults to ``func.__name__``
deprecated : `bool`, optional
whether this method is deprecated (`True`) or not (`False`)
Returns
-------
name : `str`
the registered name of the function, which may differ
pedantically from what was given by the user.
### Response:
def register_method(func, name=None, deprecated=False):
"""Register a method of calculating an average spectrogram.
Parameters
----------
func : `callable`
function to execute
name : `str`, optional
name of the method, defaults to ``func.__name__``
deprecated : `bool`, optional
whether this method is deprecated (`True`) or not (`False`)
Returns
-------
name : `str`
the registered name of the function, which may differ
pedantically from what was given by the user.
"""
# warn about deprecated functions
if deprecated:
func = deprecated_function(
func,
"the {0!r} PSD methods is deprecated, and will be removed "
"in a future release, please consider using {1!r} instead".format(
name, name.split('-', 1)[1],
),
)
if name is None:
name = func.__name__
name = _format_name(name)
METHODS[name] = func
return name |
def close(self):
"""Close the file. Can be called multiple times."""
if not self.closed:
# be sure to flush data to disk before closing the file
self.flush()
err = _snd.sf_close(self._file)
self._file = None
_error_check(err) | Close the file. Can be called multiple times. | Below is the the instruction that describes the task:
### Input:
Close the file. Can be called multiple times.
### Response:
def close(self):
"""Close the file. Can be called multiple times."""
if not self.closed:
# be sure to flush data to disk before closing the file
self.flush()
err = _snd.sf_close(self._file)
self._file = None
_error_check(err) |
def get_and_update_setting(self, name, default=None, user=True):
'''Look for a setting in the environment (first priority) and then
the settings file (second). If something is found, the settings
file is updated. The order of operations works as follows:
1. The user config file is used as a cache for the variable
2. the environment variable always takes priority to cache, and if
found, will update the cache.
3. If the variable is not found and the cache is set, we are good
5. If the variable is not found and the cache isn't set, return
default (default is None)
So the user of the function can assume a return of None equates to
not set anywhere, and take the appropriate action.
'''
setting = self._get_setting(name, user=user)
if setting is None and default is not None:
setting = default
# If the setting is found, update the client secrets
if setting is not None:
updates = {name : setting}
self._update_settings(updates)
return setting | Look for a setting in the environment (first priority) and then
the settings file (second). If something is found, the settings
file is updated. The order of operations works as follows:
1. The user config file is used as a cache for the variable
2. the environment variable always takes priority to cache, and if
found, will update the cache.
3. If the variable is not found and the cache is set, we are good
5. If the variable is not found and the cache isn't set, return
default (default is None)
So the user of the function can assume a return of None equates to
not set anywhere, and take the appropriate action. | Below is the the instruction that describes the task:
### Input:
Look for a setting in the environment (first priority) and then
the settings file (second). If something is found, the settings
file is updated. The order of operations works as follows:
1. The user config file is used as a cache for the variable
2. the environment variable always takes priority to cache, and if
found, will update the cache.
3. If the variable is not found and the cache is set, we are good
5. If the variable is not found and the cache isn't set, return
default (default is None)
So the user of the function can assume a return of None equates to
not set anywhere, and take the appropriate action.
### Response:
def get_and_update_setting(self, name, default=None, user=True):
'''Look for a setting in the environment (first priority) and then
the settings file (second). If something is found, the settings
file is updated. The order of operations works as follows:
1. The user config file is used as a cache for the variable
2. the environment variable always takes priority to cache, and if
found, will update the cache.
3. If the variable is not found and the cache is set, we are good
5. If the variable is not found and the cache isn't set, return
default (default is None)
So the user of the function can assume a return of None equates to
not set anywhere, and take the appropriate action.
'''
setting = self._get_setting(name, user=user)
if setting is None and default is not None:
setting = default
# If the setting is found, update the client secrets
if setting is not None:
updates = {name : setting}
self._update_settings(updates)
return setting |
def compute_neighbors(
self,
n_neighbors: int = 30,
knn: bool = True,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
method: str = 'umap',
random_state: Optional[Union[RandomState, int]] = 0,
write_knn_indices: bool = False,
metric: str = 'euclidean',
metric_kwds: Mapping[str, Any] = {}
) -> None:
"""\
Compute distances and connectivities of neighbors.
Parameters
----------
n_neighbors
Use this number of nearest neighbors.
knn
Restrict result to `n_neighbors` nearest neighbors.
{n_pcs}
{use_rep}
Returns
-------
Writes sparse graph attributes `.distances` and `.connectivities`.
Also writes `.knn_indices` and `.knn_distances` if
`write_knn_indices==True`.
"""
if n_neighbors > self._adata.shape[0]: # very small datasets
n_neighbors = 1 + int(0.5*self._adata.shape[0])
logg.warn('n_obs too small: adjusting to `n_neighbors = {}`'
.format(n_neighbors))
if method == 'umap' and not knn:
raise ValueError('`method = \'umap\' only with `knn = True`.')
if method not in {'umap', 'gauss'}:
raise ValueError('`method` needs to be \'umap\' or \'gauss\'.')
if self._adata.shape[0] >= 10000 and not knn:
logg.warn(
'Using high n_obs without `knn=True` takes a lot of memory...')
self.n_neighbors = n_neighbors
self.knn = knn
X = choose_representation(self._adata, use_rep=use_rep, n_pcs=n_pcs)
# neighbor search
use_dense_distances = (metric == 'euclidean' and X.shape[0] < 8192) or knn == False
if use_dense_distances:
_distances = pairwise_distances(X, metric=metric, **metric_kwds)
knn_indices, knn_distances = get_indices_distances_from_dense_matrix(
_distances, n_neighbors)
if knn:
self._distances = get_sparse_matrix_from_indices_distances_numpy(
knn_indices, knn_distances, X.shape[0], n_neighbors)
else:
self._distances = _distances
else:
# non-euclidean case and approx nearest neighbors
if X.shape[0] < 4096:
X = pairwise_distances(X, metric=metric, **metric_kwds)
metric = 'precomputed'
knn_indices, knn_distances, _ = compute_neighbors_umap(
X, n_neighbors, random_state, metric=metric, metric_kwds=metric_kwds)
#self._rp_forest = _make_forest_dict(forest)
# write indices as attributes
if write_knn_indices:
self.knn_indices = knn_indices
self.knn_distances = knn_distances
logg.msg('computed neighbors', t=True, v=4)
if not use_dense_distances or method == 'umap':
# we need self._distances also for method == 'gauss' if we didn't
# use dense distances
self._distances, self._connectivities = compute_connectivities_umap(
knn_indices, knn_distances, self._adata.shape[0], self.n_neighbors)
# overwrite the umap connectivities if method is 'gauss'
# self._distances is unaffected by this
if method == 'gauss':
self._compute_connectivities_diffmap()
logg.msg('computed connectivities', t=True, v=4)
self._number_connected_components = 1
if issparse(self._connectivities):
from scipy.sparse.csgraph import connected_components
self._connected_components = connected_components(self._connectivities)
self._number_connected_components = self._connected_components[0] | \
Compute distances and connectivities of neighbors.
Parameters
----------
n_neighbors
Use this number of nearest neighbors.
knn
Restrict result to `n_neighbors` nearest neighbors.
{n_pcs}
{use_rep}
Returns
-------
Writes sparse graph attributes `.distances` and `.connectivities`.
Also writes `.knn_indices` and `.knn_distances` if
`write_knn_indices==True`. | Below is the the instruction that describes the task:
### Input:
\
Compute distances and connectivities of neighbors.
Parameters
----------
n_neighbors
Use this number of nearest neighbors.
knn
Restrict result to `n_neighbors` nearest neighbors.
{n_pcs}
{use_rep}
Returns
-------
Writes sparse graph attributes `.distances` and `.connectivities`.
Also writes `.knn_indices` and `.knn_distances` if
`write_knn_indices==True`.
### Response:
def compute_neighbors(
self,
n_neighbors: int = 30,
knn: bool = True,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
method: str = 'umap',
random_state: Optional[Union[RandomState, int]] = 0,
write_knn_indices: bool = False,
metric: str = 'euclidean',
metric_kwds: Mapping[str, Any] = {}
) -> None:
"""\
Compute distances and connectivities of neighbors.
Parameters
----------
n_neighbors
Use this number of nearest neighbors.
knn
Restrict result to `n_neighbors` nearest neighbors.
{n_pcs}
{use_rep}
Returns
-------
Writes sparse graph attributes `.distances` and `.connectivities`.
Also writes `.knn_indices` and `.knn_distances` if
`write_knn_indices==True`.
"""
if n_neighbors > self._adata.shape[0]: # very small datasets
n_neighbors = 1 + int(0.5*self._adata.shape[0])
logg.warn('n_obs too small: adjusting to `n_neighbors = {}`'
.format(n_neighbors))
if method == 'umap' and not knn:
raise ValueError('`method = \'umap\' only with `knn = True`.')
if method not in {'umap', 'gauss'}:
raise ValueError('`method` needs to be \'umap\' or \'gauss\'.')
if self._adata.shape[0] >= 10000 and not knn:
logg.warn(
'Using high n_obs without `knn=True` takes a lot of memory...')
self.n_neighbors = n_neighbors
self.knn = knn
X = choose_representation(self._adata, use_rep=use_rep, n_pcs=n_pcs)
# neighbor search
use_dense_distances = (metric == 'euclidean' and X.shape[0] < 8192) or knn == False
if use_dense_distances:
_distances = pairwise_distances(X, metric=metric, **metric_kwds)
knn_indices, knn_distances = get_indices_distances_from_dense_matrix(
_distances, n_neighbors)
if knn:
self._distances = get_sparse_matrix_from_indices_distances_numpy(
knn_indices, knn_distances, X.shape[0], n_neighbors)
else:
self._distances = _distances
else:
# non-euclidean case and approx nearest neighbors
if X.shape[0] < 4096:
X = pairwise_distances(X, metric=metric, **metric_kwds)
metric = 'precomputed'
knn_indices, knn_distances, _ = compute_neighbors_umap(
X, n_neighbors, random_state, metric=metric, metric_kwds=metric_kwds)
#self._rp_forest = _make_forest_dict(forest)
# write indices as attributes
if write_knn_indices:
self.knn_indices = knn_indices
self.knn_distances = knn_distances
logg.msg('computed neighbors', t=True, v=4)
if not use_dense_distances or method == 'umap':
# we need self._distances also for method == 'gauss' if we didn't
# use dense distances
self._distances, self._connectivities = compute_connectivities_umap(
knn_indices, knn_distances, self._adata.shape[0], self.n_neighbors)
# overwrite the umap connectivities if method is 'gauss'
# self._distances is unaffected by this
if method == 'gauss':
self._compute_connectivities_diffmap()
logg.msg('computed connectivities', t=True, v=4)
self._number_connected_components = 1
if issparse(self._connectivities):
from scipy.sparse.csgraph import connected_components
self._connected_components = connected_components(self._connectivities)
self._number_connected_components = self._connected_components[0] |
def plot_grouped_gos(self, fout_img=None, exclude_hdrs=None, **kws_usr):
"""One Plot containing all user GOs (yellow or green) and header GO IDs(green or purple)."""
# kws_plt -> go2color go2bordercolor
kws_plt, kws_dag = self._get_kws_plt(self.grprobj.usrgos, **kws_usr)
pltgosusr = self.grprobj.usrgos
if exclude_hdrs is not None:
pltgosusr = pltgosusr.difference(self.grprobj.get_usrgos_g_hdrgos(exclude_hdrs))
if fout_img is None:
fout_img = "{GRP_NAME}.png".format(GRP_NAME=self.grprobj.grpname)
# Split one plot into potentially three (BP, MF, CC) if png filename contains '{NS}'
if '{NS}' in fout_img:
go2nt = self.grprobj.gosubdag.get_go2nt(pltgosusr)
for namespace in ['BP', 'MF', 'CC']:
pltgos_ns = [go for go in pltgosusr if go2nt[go].NS == namespace]
if pltgos_ns:
png = fout_img.format(NS=namespace)
self._plot_grouped_gos(png, pltgos_ns, kws_plt, kws_dag)
# Plot all user GO IDs into a single plot, regardless of their namespace
else:
self._plot_grouped_gos(fout_img, pltgosusr, kws_plt, kws_dag) | One Plot containing all user GOs (yellow or green) and header GO IDs(green or purple). | Below is the the instruction that describes the task:
### Input:
One Plot containing all user GOs (yellow or green) and header GO IDs(green or purple).
### Response:
def plot_grouped_gos(self, fout_img=None, exclude_hdrs=None, **kws_usr):
"""One Plot containing all user GOs (yellow or green) and header GO IDs(green or purple)."""
# kws_plt -> go2color go2bordercolor
kws_plt, kws_dag = self._get_kws_plt(self.grprobj.usrgos, **kws_usr)
pltgosusr = self.grprobj.usrgos
if exclude_hdrs is not None:
pltgosusr = pltgosusr.difference(self.grprobj.get_usrgos_g_hdrgos(exclude_hdrs))
if fout_img is None:
fout_img = "{GRP_NAME}.png".format(GRP_NAME=self.grprobj.grpname)
# Split one plot into potentially three (BP, MF, CC) if png filename contains '{NS}'
if '{NS}' in fout_img:
go2nt = self.grprobj.gosubdag.get_go2nt(pltgosusr)
for namespace in ['BP', 'MF', 'CC']:
pltgos_ns = [go for go in pltgosusr if go2nt[go].NS == namespace]
if pltgos_ns:
png = fout_img.format(NS=namespace)
self._plot_grouped_gos(png, pltgos_ns, kws_plt, kws_dag)
# Plot all user GO IDs into a single plot, regardless of their namespace
else:
self._plot_grouped_gos(fout_img, pltgosusr, kws_plt, kws_dag) |
def gauss_hermite_nodes(orders, sigma, mu=None):
'''
Computes the weights and nodes for Gauss Hermite quadrature.
Parameters
----------
orders : int, list, array
The order of integration used in the quadrature routine
sigma : array-like
If one dimensional, the variance of the normal distribution being
approximated. If multidimensional, the variance-covariance matrix of
the multivariate normal process being approximated.
Returns
-------
x : array
Quadrature nodes
w : array
Quadrature weights
'''
if isinstance(orders, int):
orders = [orders]
import numpy
if mu is None:
mu = numpy.array( [0]*sigma.shape[0] )
herms = [hermgauss(i) for i in orders]
points = [ h[0]*numpy.sqrt(2) for h in herms]
weights = [ h[1]/numpy.sqrt( numpy.pi) for h in herms]
if len(orders) == 1:
# Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1.
# print(points.shape)
x = numpy.array(points[0])*numpy.sqrt(float(sigma))
if sigma.ndim==2:
x = x[:,None]
w = weights[0]
return [x,w]
else:
x = cartesian( points).T
from functools import reduce
w = reduce( numpy.kron, weights)
zero_columns = numpy.where(sigma.sum(axis=0)==0)[0]
for i in zero_columns:
sigma[i,i] = 1.0
C = numpy.linalg.cholesky(sigma)
x = numpy.dot(C, x) + mu[:,numpy.newaxis]
x = numpy.ascontiguousarray(x.T)
for i in zero_columns:
x[:,i] =0
return [x,w] | Computes the weights and nodes for Gauss Hermite quadrature.
Parameters
----------
orders : int, list, array
The order of integration used in the quadrature routine
sigma : array-like
If one dimensional, the variance of the normal distribution being
approximated. If multidimensional, the variance-covariance matrix of
the multivariate normal process being approximated.
Returns
-------
x : array
Quadrature nodes
w : array
Quadrature weights | Below is the the instruction that describes the task:
### Input:
Computes the weights and nodes for Gauss Hermite quadrature.
Parameters
----------
orders : int, list, array
The order of integration used in the quadrature routine
sigma : array-like
If one dimensional, the variance of the normal distribution being
approximated. If multidimensional, the variance-covariance matrix of
the multivariate normal process being approximated.
Returns
-------
x : array
Quadrature nodes
w : array
Quadrature weights
### Response:
def gauss_hermite_nodes(orders, sigma, mu=None):
'''
Computes the weights and nodes for Gauss Hermite quadrature.
Parameters
----------
orders : int, list, array
The order of integration used in the quadrature routine
sigma : array-like
If one dimensional, the variance of the normal distribution being
approximated. If multidimensional, the variance-covariance matrix of
the multivariate normal process being approximated.
Returns
-------
x : array
Quadrature nodes
w : array
Quadrature weights
'''
if isinstance(orders, int):
orders = [orders]
import numpy
if mu is None:
mu = numpy.array( [0]*sigma.shape[0] )
herms = [hermgauss(i) for i in orders]
points = [ h[0]*numpy.sqrt(2) for h in herms]
weights = [ h[1]/numpy.sqrt( numpy.pi) for h in herms]
if len(orders) == 1:
# Note: if sigma is 2D, x will always be 2D, even if sigma is only 1x1.
# print(points.shape)
x = numpy.array(points[0])*numpy.sqrt(float(sigma))
if sigma.ndim==2:
x = x[:,None]
w = weights[0]
return [x,w]
else:
x = cartesian( points).T
from functools import reduce
w = reduce( numpy.kron, weights)
zero_columns = numpy.where(sigma.sum(axis=0)==0)[0]
for i in zero_columns:
sigma[i,i] = 1.0
C = numpy.linalg.cholesky(sigma)
x = numpy.dot(C, x) + mu[:,numpy.newaxis]
x = numpy.ascontiguousarray(x.T)
for i in zero_columns:
x[:,i] =0
return [x,w] |
def _ReverseHostname(self, hostname):
"""Reverses the hostname and strips the leading dot.
The hostname entry is reversed:
moc.elgoog.www.
Should be:
www.google.com
Args:
hostname (str): reversed hostname.
Returns:
str: hostname without a leading dot.
"""
if not hostname:
return ''
if len(hostname) <= 1:
return hostname
if hostname[-1] == '.':
return hostname[::-1][1:]
return hostname[::-1][0:] | Reverses the hostname and strips the leading dot.
The hostname entry is reversed:
moc.elgoog.www.
Should be:
www.google.com
Args:
hostname (str): reversed hostname.
Returns:
str: hostname without a leading dot. | Below is the the instruction that describes the task:
### Input:
Reverses the hostname and strips the leading dot.
The hostname entry is reversed:
moc.elgoog.www.
Should be:
www.google.com
Args:
hostname (str): reversed hostname.
Returns:
str: hostname without a leading dot.
### Response:
def _ReverseHostname(self, hostname):
"""Reverses the hostname and strips the leading dot.
The hostname entry is reversed:
moc.elgoog.www.
Should be:
www.google.com
Args:
hostname (str): reversed hostname.
Returns:
str: hostname without a leading dot.
"""
if not hostname:
return ''
if len(hostname) <= 1:
return hostname
if hostname[-1] == '.':
return hostname[::-1][1:]
return hostname[::-1][0:] |
def spawn_container(addr, env_cls=Environment,
mgr_cls=EnvManager, set_seed=True, *args, **kwargs):
"""Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``.
"""
# Try setting the process name to easily recognize the spawned
# environments with 'ps -x' or 'top'
try:
import setproctitle as spt
title = 'creamas: {}({})'.format(env_cls.__class__.__name__,
_get_base_url(addr))
spt.setproctitle(title)
except:
pass
if set_seed:
_set_random_seeds()
# kwargs['codec'] = aiomas.MsgPack
task = start(addr, env_cls, mgr_cls, *args, **kwargs)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(task) | Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``. | Below is the the instruction that describes the task:
### Input:
Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``.
### Response:
def spawn_container(addr, env_cls=Environment,
mgr_cls=EnvManager, set_seed=True, *args, **kwargs):
"""Spawn a new environment in a given address as a coroutine.
Arguments and keyword arguments are passed down to the created environment
at initialization time.
If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is
installed, this function renames the title of the process to start with
'creamas' so that the process is easily identifiable, e.g. with
``ps -x | grep creamas``.
"""
# Try setting the process name to easily recognize the spawned
# environments with 'ps -x' or 'top'
try:
import setproctitle as spt
title = 'creamas: {}({})'.format(env_cls.__class__.__name__,
_get_base_url(addr))
spt.setproctitle(title)
except:
pass
if set_seed:
_set_random_seeds()
# kwargs['codec'] = aiomas.MsgPack
task = start(addr, env_cls, mgr_cls, *args, **kwargs)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(task) |
def log(self, timer_name, node):
''' logs a event in the timer '''
timestamp = time.time()
if hasattr(self, timer_name):
getattr(self, timer_name).append({
"node":node,
"time":timestamp})
else:
setattr(self, timer_name, [{"node":node, "time":timestamp}]) | logs a event in the timer | Below is the the instruction that describes the task:
### Input:
logs a event in the timer
### Response:
def log(self, timer_name, node):
''' logs a event in the timer '''
timestamp = time.time()
if hasattr(self, timer_name):
getattr(self, timer_name).append({
"node":node,
"time":timestamp})
else:
setattr(self, timer_name, [{"node":node, "time":timestamp}]) |
def validate_state_locations(self):
"""
Names of all state locations must be unique.
"""
names = map(lambda loc: loc["name"], self.locations)
assert len(names) == len(set(names)), "Names of state locations must be unique" | Names of all state locations must be unique. | Below is the the instruction that describes the task:
### Input:
Names of all state locations must be unique.
### Response:
def validate_state_locations(self):
"""
Names of all state locations must be unique.
"""
names = map(lambda loc: loc["name"], self.locations)
assert len(names) == len(set(names)), "Names of state locations must be unique" |
def lineReceived(self, line):
"""
Called when a line is received.
We expect a length in bytes or an empty line for keep-alive. If
we got a length, switch to raw mode to receive that amount of bytes.
"""
if line and line.isdigit():
self._expectedLength = int(line)
self._rawBuffer = []
self._rawBufferLength = 0
self.setRawMode()
else:
self.keepAliveReceived() | Called when a line is received.
We expect a length in bytes or an empty line for keep-alive. If
we got a length, switch to raw mode to receive that amount of bytes. | Below is the the instruction that describes the task:
### Input:
Called when a line is received.
We expect a length in bytes or an empty line for keep-alive. If
we got a length, switch to raw mode to receive that amount of bytes.
### Response:
def lineReceived(self, line):
"""
Called when a line is received.
We expect a length in bytes or an empty line for keep-alive. If
we got a length, switch to raw mode to receive that amount of bytes.
"""
if line and line.isdigit():
self._expectedLength = int(line)
self._rawBuffer = []
self._rawBufferLength = 0
self.setRawMode()
else:
self.keepAliveReceived() |
async def send(self, parameters: RTCRtpSendParameters):
"""
Attempt to set the parameters controlling the sending of media.
:param: parameters: The :class:`RTCRtpParameters` for the sender.
"""
if not self.__started:
self.__cname = parameters.rtcp.cname
self.__mid = parameters.muxId
# make note of the RTP header extension IDs
self.__transport._register_rtp_sender(self, parameters)
self.__rtp_header_extensions_map.configure(parameters)
# make note of RTX payload type
for codec in parameters.codecs:
if is_rtx(codec) and codec.parameters['apt'] == parameters.codecs[0].payloadType:
self.__rtx_payload_type = codec.payloadType
break
self.__rtp_task = asyncio.ensure_future(self._run_rtp(parameters.codecs[0]))
self.__rtcp_task = asyncio.ensure_future(self._run_rtcp())
self.__started = True | Attempt to set the parameters controlling the sending of media.
:param: parameters: The :class:`RTCRtpParameters` for the sender. | Below is the the instruction that describes the task:
### Input:
Attempt to set the parameters controlling the sending of media.
:param: parameters: The :class:`RTCRtpParameters` for the sender.
### Response:
async def send(self, parameters: RTCRtpSendParameters):
"""
Attempt to set the parameters controlling the sending of media.
:param: parameters: The :class:`RTCRtpParameters` for the sender.
"""
if not self.__started:
self.__cname = parameters.rtcp.cname
self.__mid = parameters.muxId
# make note of the RTP header extension IDs
self.__transport._register_rtp_sender(self, parameters)
self.__rtp_header_extensions_map.configure(parameters)
# make note of RTX payload type
for codec in parameters.codecs:
if is_rtx(codec) and codec.parameters['apt'] == parameters.codecs[0].payloadType:
self.__rtx_payload_type = codec.payloadType
break
self.__rtp_task = asyncio.ensure_future(self._run_rtp(parameters.codecs[0]))
self.__rtcp_task = asyncio.ensure_future(self._run_rtcp())
self.__started = True |
def sum_in_date(x='date', y='net_sales', filter_dict=None, model='WikiItem', app=DEFAULT_APP, sort=True, limit=100000):
"""
Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts.
FIXME: Tests need models with a date field:
Examples:
>> x, y = sum_in_date(y='net_sales', filter_dict={'model__startswith': 'LC60'}, model='Permission', limit=5, sort=1)
>> len(x) == len(y) == 5
True
>> y[1] >= y[0]
True
"""
sort = sort_prefix(sort)
model = get_model(model, app)
filter_dict = filter_dict or {}
objects = model.objects.filter(**filter_dict)
# only the x values are now in the queryset (datetime information)
objects = objects.values(x)
objects = objects.annotate(y=djmodels.Sum(y))
if sort is not None:
# FIXME: this duplicates the dict of lists sort below
objects = objects.order_by(sort + 'y')
objects = objects.all()
if limit:
objects = objects[:int(limit)]
objects = util.sod_transposed(objects)
if sort is not None:
objects = sorted_dict_of_lists(objects, field_names=['y', x], reverse=bool(sort=='-'))
if not x in objects or not 'y' in objects:
return [], []
else:
return objects[x], objects['y'] | Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts.
FIXME: Tests need models with a date field:
Examples:
>> x, y = sum_in_date(y='net_sales', filter_dict={'model__startswith': 'LC60'}, model='Permission', limit=5, sort=1)
>> len(x) == len(y) == 5
True
>> y[1] >= y[0]
True | Below is the the instruction that describes the task:
### Input:
Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts.
FIXME: Tests need models with a date field:
Examples:
>> x, y = sum_in_date(y='net_sales', filter_dict={'model__startswith': 'LC60'}, model='Permission', limit=5, sort=1)
>> len(x) == len(y) == 5
True
>> y[1] >= y[0]
True
### Response:
def sum_in_date(x='date', y='net_sales', filter_dict=None, model='WikiItem', app=DEFAULT_APP, sort=True, limit=100000):
"""
Count the number of records for each discrete (categorical) value of a field and return a dict of two lists, the field values and the counts.
FIXME: Tests need models with a date field:
Examples:
>> x, y = sum_in_date(y='net_sales', filter_dict={'model__startswith': 'LC60'}, model='Permission', limit=5, sort=1)
>> len(x) == len(y) == 5
True
>> y[1] >= y[0]
True
"""
sort = sort_prefix(sort)
model = get_model(model, app)
filter_dict = filter_dict or {}
objects = model.objects.filter(**filter_dict)
# only the x values are now in the queryset (datetime information)
objects = objects.values(x)
objects = objects.annotate(y=djmodels.Sum(y))
if sort is not None:
# FIXME: this duplicates the dict of lists sort below
objects = objects.order_by(sort + 'y')
objects = objects.all()
if limit:
objects = objects[:int(limit)]
objects = util.sod_transposed(objects)
if sort is not None:
objects = sorted_dict_of_lists(objects, field_names=['y', x], reverse=bool(sort=='-'))
if not x in objects or not 'y' in objects:
return [], []
else:
return objects[x], objects['y'] |
def view_dupl_sources(token, dstore):
"""
Show the sources with the same ID and the truly duplicated sources
"""
fields = ['source_id', 'code', 'gidx1', 'gidx2', 'num_ruptures']
dic = group_array(dstore['source_info'].value[fields], 'source_id')
sameid = []
dupl = []
for source_id, group in dic.items():
if len(group) > 1: # same ID sources
sources = []
for rec in group:
geom = dstore['source_geom'][rec['gidx1']:rec['gidx2']]
src = Source(source_id, rec['code'], geom, rec['num_ruptures'])
sources.append(src)
if all_equal(sources):
dupl.append(source_id)
sameid.append(source_id)
if not dupl:
return ''
msg = str(dupl) + '\n'
msg += ('Found %d source(s) with the same ID and %d true duplicate(s)'
% (len(sameid), len(dupl)))
fakedupl = set(sameid) - set(dupl)
if fakedupl:
msg += '\nHere is a fake duplicate: %s' % fakedupl.pop()
return msg | Show the sources with the same ID and the truly duplicated sources | Below is the the instruction that describes the task:
### Input:
Show the sources with the same ID and the truly duplicated sources
### Response:
def view_dupl_sources(token, dstore):
"""
Show the sources with the same ID and the truly duplicated sources
"""
fields = ['source_id', 'code', 'gidx1', 'gidx2', 'num_ruptures']
dic = group_array(dstore['source_info'].value[fields], 'source_id')
sameid = []
dupl = []
for source_id, group in dic.items():
if len(group) > 1: # same ID sources
sources = []
for rec in group:
geom = dstore['source_geom'][rec['gidx1']:rec['gidx2']]
src = Source(source_id, rec['code'], geom, rec['num_ruptures'])
sources.append(src)
if all_equal(sources):
dupl.append(source_id)
sameid.append(source_id)
if not dupl:
return ''
msg = str(dupl) + '\n'
msg += ('Found %d source(s) with the same ID and %d true duplicate(s)'
% (len(sameid), len(dupl)))
fakedupl = set(sameid) - set(dupl)
if fakedupl:
msg += '\nHere is a fake duplicate: %s' % fakedupl.pop()
return msg |
def _defaults(self, keys=None):
"""create an empty record"""
d = {}
keys = self._keys if keys is None else keys
for key in keys:
d[key] = None
return d | create an empty record | Below is the the instruction that describes the task:
### Input:
create an empty record
### Response:
def _defaults(self, keys=None):
"""create an empty record"""
d = {}
keys = self._keys if keys is None else keys
for key in keys:
d[key] = None
return d |
def config_maker(project_name, path):
"""Creates a config file based on the project name"""
with open(skeleton_path("config.py"), "r") as config_source:
config_content = config_source.read()
config_content = config_content.replace("__PROJECT_NAME__", project_name)
with open(path, "w") as config_dest:
config_dest.write(config_content) | Creates a config file based on the project name | Below is the the instruction that describes the task:
### Input:
Creates a config file based on the project name
### Response:
def config_maker(project_name, path):
"""Creates a config file based on the project name"""
with open(skeleton_path("config.py"), "r") as config_source:
config_content = config_source.read()
config_content = config_content.replace("__PROJECT_NAME__", project_name)
with open(path, "w") as config_dest:
config_dest.write(config_content) |
def tail_of_file(filename, n, ansi2html=False):
"""Reads a n lines from f with an offset of offset lines. """
avg_line_length = 74
to_read = n
with open(filename) as f:
while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
# woops. apparently file is smaller than what we want
# to step back, go to the beginning instead
f.seek(0)
pos = f.tell()
lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0:
if ansi2html:
return convertAnsi2html('\n'.join(lines[-to_read:]))
return '\n'.join(lines[-to_read:]) + '\n'
avg_line_length *= 1.3 | Reads a n lines from f with an offset of offset lines. | Below is the the instruction that describes the task:
### Input:
Reads a n lines from f with an offset of offset lines.
### Response:
def tail_of_file(filename, n, ansi2html=False):
"""Reads a n lines from f with an offset of offset lines. """
avg_line_length = 74
to_read = n
with open(filename) as f:
while 1:
try:
f.seek(-(avg_line_length * to_read), 2)
except IOError:
# woops. apparently file is smaller than what we want
# to step back, go to the beginning instead
f.seek(0)
pos = f.tell()
lines = f.read().splitlines()
if len(lines) >= to_read or pos == 0:
if ansi2html:
return convertAnsi2html('\n'.join(lines[-to_read:]))
return '\n'.join(lines[-to_read:]) + '\n'
avg_line_length *= 1.3 |
def zone_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: Fluorine
Ensure a DNS zone does not exist in the resource group.
:param name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
zone = __salt__['azurearm_dns.zone_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in zone:
ret['result'] = True
ret['comment'] = 'DNS zone {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'DNS zone {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': zone,
'new': {},
}
return ret
deleted = __salt__['azurearm_dns.zone_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'DNS zone {0} has been deleted.'.format(name)
ret['changes'] = {
'old': zone,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete DNS zone {0}!'.format(name)
return ret | .. versionadded:: Fluorine
Ensure a DNS zone does not exist in the resource group.
:param name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API. | Below is the the instruction that describes the task:
### Input:
.. versionadded:: Fluorine
Ensure a DNS zone does not exist in the resource group.
:param name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
### Response:
def zone_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: Fluorine
Ensure a DNS zone does not exist in the resource group.
:param name:
Name of the DNS zone.
:param resource_group:
The resource group assigned to the DNS zone.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
zone = __salt__['azurearm_dns.zone_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in zone:
ret['result'] = True
ret['comment'] = 'DNS zone {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'DNS zone {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': zone,
'new': {},
}
return ret
deleted = __salt__['azurearm_dns.zone_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'DNS zone {0} has been deleted.'.format(name)
ret['changes'] = {
'old': zone,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete DNS zone {0}!'.format(name)
return ret |
def _add_notification_config_to_xml(node, element_name, configs):
"""
Internal function that builds the XML sub-structure for a given
kind of notification configuration.
"""
for config in configs:
config_node = s3_xml.SubElement(node, element_name)
if 'Id' in config:
id_node = s3_xml.SubElement(config_node, 'Id')
id_node.text = config['Id']
arn_node = s3_xml.SubElement(
config_node,
NOTIFICATIONS_ARN_FIELDNAME_MAP[element_name]
)
arn_node.text = config['Arn']
for event in config['Events']:
event_node = s3_xml.SubElement(config_node, 'Event')
event_node.text = event
filter_rules = config.get('Filter', {}).get(
'Key', {}).get('FilterRules', [])
if filter_rules:
filter_node = s3_xml.SubElement(config_node, 'Filter')
s3key_node = s3_xml.SubElement(filter_node, 'S3Key')
for filter_rule in filter_rules:
filter_rule_node = s3_xml.SubElement(s3key_node, 'FilterRule')
name_node = s3_xml.SubElement(filter_rule_node, 'Name')
name_node.text = filter_rule['Name']
value_node = s3_xml.SubElement(filter_rule_node, 'Value')
value_node.text = filter_rule['Value']
return node | Internal function that builds the XML sub-structure for a given
kind of notification configuration. | Below is the the instruction that describes the task:
### Input:
Internal function that builds the XML sub-structure for a given
kind of notification configuration.
### Response:
def _add_notification_config_to_xml(node, element_name, configs):
"""
Internal function that builds the XML sub-structure for a given
kind of notification configuration.
"""
for config in configs:
config_node = s3_xml.SubElement(node, element_name)
if 'Id' in config:
id_node = s3_xml.SubElement(config_node, 'Id')
id_node.text = config['Id']
arn_node = s3_xml.SubElement(
config_node,
NOTIFICATIONS_ARN_FIELDNAME_MAP[element_name]
)
arn_node.text = config['Arn']
for event in config['Events']:
event_node = s3_xml.SubElement(config_node, 'Event')
event_node.text = event
filter_rules = config.get('Filter', {}).get(
'Key', {}).get('FilterRules', [])
if filter_rules:
filter_node = s3_xml.SubElement(config_node, 'Filter')
s3key_node = s3_xml.SubElement(filter_node, 'S3Key')
for filter_rule in filter_rules:
filter_rule_node = s3_xml.SubElement(s3key_node, 'FilterRule')
name_node = s3_xml.SubElement(filter_rule_node, 'Name')
name_node.text = filter_rule['Name']
value_node = s3_xml.SubElement(filter_rule_node, 'Value')
value_node.text = filter_rule['Value']
return node |
def giant_text_sqltype(dialect: Dialect) -> str:
"""
Returns the SQL column type used to make very large text columns for a
given dialect.
Args:
dialect: a SQLAlchemy :class:`Dialect`
Returns:
the SQL data type of "giant text", typically 'LONGTEXT' for MySQL
and 'NVARCHAR(MAX)' for SQL Server.
"""
if dialect.name == SqlaDialectName.SQLSERVER:
return 'NVARCHAR(MAX)'
elif dialect.name == SqlaDialectName.MYSQL:
return 'LONGTEXT'
else:
raise ValueError("Unknown dialect: {}".format(dialect.name)) | Returns the SQL column type used to make very large text columns for a
given dialect.
Args:
dialect: a SQLAlchemy :class:`Dialect`
Returns:
the SQL data type of "giant text", typically 'LONGTEXT' for MySQL
and 'NVARCHAR(MAX)' for SQL Server. | Below is the the instruction that describes the task:
### Input:
Returns the SQL column type used to make very large text columns for a
given dialect.
Args:
dialect: a SQLAlchemy :class:`Dialect`
Returns:
the SQL data type of "giant text", typically 'LONGTEXT' for MySQL
and 'NVARCHAR(MAX)' for SQL Server.
### Response:
def giant_text_sqltype(dialect: Dialect) -> str:
"""
Returns the SQL column type used to make very large text columns for a
given dialect.
Args:
dialect: a SQLAlchemy :class:`Dialect`
Returns:
the SQL data type of "giant text", typically 'LONGTEXT' for MySQL
and 'NVARCHAR(MAX)' for SQL Server.
"""
if dialect.name == SqlaDialectName.SQLSERVER:
return 'NVARCHAR(MAX)'
elif dialect.name == SqlaDialectName.MYSQL:
return 'LONGTEXT'
else:
raise ValueError("Unknown dialect: {}".format(dialect.name)) |
def window(self, window_name):
"""
Switches focus to the specified window.
:Args:
- window_name: The name or window handle of the window to switch to.
:Usage:
::
driver.switch_to.window('main')
"""
if self._driver.w3c:
self._w3c_window(window_name)
return
data = {'name': window_name}
self._driver.execute(Command.SWITCH_TO_WINDOW, data) | Switches focus to the specified window.
:Args:
- window_name: The name or window handle of the window to switch to.
:Usage:
::
driver.switch_to.window('main') | Below is the the instruction that describes the task:
### Input:
Switches focus to the specified window.
:Args:
- window_name: The name or window handle of the window to switch to.
:Usage:
::
driver.switch_to.window('main')
### Response:
def window(self, window_name):
"""
Switches focus to the specified window.
:Args:
- window_name: The name or window handle of the window to switch to.
:Usage:
::
driver.switch_to.window('main')
"""
if self._driver.w3c:
self._w3c_window(window_name)
return
data = {'name': window_name}
self._driver.execute(Command.SWITCH_TO_WINDOW, data) |
def _FetchMostRecentGraphSeriesFromTheLegacyDB(
label,
report_type,
token = None
):
"""Fetches the latest graph-series for a client label from the legacy DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the DB.
Raises:
AFF4AttributeTypeError: If an unexpected report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist.
"""
try:
stats_for_label = aff4.FACTORY.Open(
GetAFF4ClientReportsURN().Add(label),
aff4_type=aff4_stats.ClientFleetStats,
mode="r",
token=token)
except aff4.InstantiationError:
# Nothing to return for the given label and report-type.
return None
aff4_attr = _GetAFF4AttributeForReportType(report_type)
graph_series = rdf_stats.ClientGraphSeries(report_type=report_type)
if aff4_attr.attribute_type == rdf_stats.GraphSeries:
graphs = stats_for_label.Get(aff4_attr)
if graphs is None:
return None
for graph in graphs:
graph_series.graphs.Append(graph)
elif aff4_attr.attribute_type == rdf_stats.Graph:
graph = stats_for_label.Get(aff4_attr)
if graph is None:
return None
graph_series.graphs.Append(graph)
else:
raise AFF4AttributeTypeError(aff4_attr.attribute_type)
return graph_series | Fetches the latest graph-series for a client label from the legacy DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the DB.
Raises:
AFF4AttributeTypeError: If an unexpected report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist. | Below is the the instruction that describes the task:
### Input:
Fetches the latest graph-series for a client label from the legacy DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the DB.
Raises:
AFF4AttributeTypeError: If an unexpected report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist.
### Response:
def _FetchMostRecentGraphSeriesFromTheLegacyDB(
label,
report_type,
token = None
):
"""Fetches the latest graph-series for a client label from the legacy DB.
Args:
label: Client label to fetch data for.
report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for.
token: ACL token to use for reading from the DB.
Raises:
AFF4AttributeTypeError: If an unexpected report-data type is encountered.
Returns:
The graph series for the given label and report type that was last
written to the DB, or None if no series for that label and report-type
exist.
"""
try:
stats_for_label = aff4.FACTORY.Open(
GetAFF4ClientReportsURN().Add(label),
aff4_type=aff4_stats.ClientFleetStats,
mode="r",
token=token)
except aff4.InstantiationError:
# Nothing to return for the given label and report-type.
return None
aff4_attr = _GetAFF4AttributeForReportType(report_type)
graph_series = rdf_stats.ClientGraphSeries(report_type=report_type)
if aff4_attr.attribute_type == rdf_stats.GraphSeries:
graphs = stats_for_label.Get(aff4_attr)
if graphs is None:
return None
for graph in graphs:
graph_series.graphs.Append(graph)
elif aff4_attr.attribute_type == rdf_stats.Graph:
graph = stats_for_label.Get(aff4_attr)
if graph is None:
return None
graph_series.graphs.Append(graph)
else:
raise AFF4AttributeTypeError(aff4_attr.attribute_type)
return graph_series |
def suppress_output(reverse=False):
"""
Suppress output
"""
if reverse:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
else:
sys.stdout = os.devnull
sys.stderr = os.devnull | Suppress output | Below is the the instruction that describes the task:
### Input:
Suppress output
### Response:
def suppress_output(reverse=False):
"""
Suppress output
"""
if reverse:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
else:
sys.stdout = os.devnull
sys.stderr = os.devnull |
def install_python(name, version=None, install_args=None, override_args=False):
'''
Instructs Chocolatey to install a package via Python's easy_install.
name
The name of the package to be installed. Only accepts a single argument.
version
Install a specific version of the package. Defaults to latest version
available.
install_args
A list of install arguments you want to pass to the installation process
i.e product key or feature list
override_args
Set to true if you want to override the original install arguments (for
the native installer) in the package and use your own. When this is set
to False install_args will be appended to the end of the default
arguments
CLI Example:
.. code-block:: bash
salt '*' chocolatey.install_python <package name>
salt '*' chocolatey.install_python <package name> version=<package version>
salt '*' chocolatey.install_python <package name> install_args=<args> override_args=True
'''
return install(name,
version=version,
source='python',
install_args=install_args,
override_args=override_args) | Instructs Chocolatey to install a package via Python's easy_install.
name
The name of the package to be installed. Only accepts a single argument.
version
Install a specific version of the package. Defaults to latest version
available.
install_args
A list of install arguments you want to pass to the installation process
i.e product key or feature list
override_args
Set to true if you want to override the original install arguments (for
the native installer) in the package and use your own. When this is set
to False install_args will be appended to the end of the default
arguments
CLI Example:
.. code-block:: bash
salt '*' chocolatey.install_python <package name>
salt '*' chocolatey.install_python <package name> version=<package version>
salt '*' chocolatey.install_python <package name> install_args=<args> override_args=True | Below is the the instruction that describes the task:
### Input:
Instructs Chocolatey to install a package via Python's easy_install.
name
The name of the package to be installed. Only accepts a single argument.
version
Install a specific version of the package. Defaults to latest version
available.
install_args
A list of install arguments you want to pass to the installation process
i.e product key or feature list
override_args
Set to true if you want to override the original install arguments (for
the native installer) in the package and use your own. When this is set
to False install_args will be appended to the end of the default
arguments
CLI Example:
.. code-block:: bash
salt '*' chocolatey.install_python <package name>
salt '*' chocolatey.install_python <package name> version=<package version>
salt '*' chocolatey.install_python <package name> install_args=<args> override_args=True
### Response:
def install_python(name, version=None, install_args=None, override_args=False):
'''
Instructs Chocolatey to install a package via Python's easy_install.
name
The name of the package to be installed. Only accepts a single argument.
version
Install a specific version of the package. Defaults to latest version
available.
install_args
A list of install arguments you want to pass to the installation process
i.e product key or feature list
override_args
Set to true if you want to override the original install arguments (for
the native installer) in the package and use your own. When this is set
to False install_args will be appended to the end of the default
arguments
CLI Example:
.. code-block:: bash
salt '*' chocolatey.install_python <package name>
salt '*' chocolatey.install_python <package name> version=<package version>
salt '*' chocolatey.install_python <package name> install_args=<args> override_args=True
'''
return install(name,
version=version,
source='python',
install_args=install_args,
override_args=override_args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.