text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
method returns DB cursor based on precise boundaries
<END_TASK>
<USER_TASK:>
Description:
def cursor_fine(self,
table_name,
start_id_obj,
end_id_obj,
iteration,
start_timeperiod,
end_timeperiod):
""" method returns DB cursor based on precise boundaries """ |
raise NotImplementedError('method cursor_fine must be implemented by {0}'.format(self.__class__.__name__)) |
<SYSTEM_TASK:>
method returns batched DB cursor
<END_TASK>
<USER_TASK:>
Description:
def cursor_batch(self,
table_name,
start_timeperiod,
end_timeperiod):
""" method returns batched DB cursor """ |
raise NotImplementedError('method cursor_batch must be implemented by {0}'.format(self.__class__.__name__)) |
<SYSTEM_TASK:>
replaces document identified by the primary_key or creates one if a matching document does not exist
<END_TASK>
<USER_TASK:>
Description:
def update(self, table_name, primary_key, instance):
""" replaces document identified by the primary_key or creates one if a matching document does not exist""" |
assert isinstance(primary_key, dict)
assert isinstance(instance, BaseDocument)
collection = self._db[table_name]
# work with a copy of the document, as the direct type change of the _id field
# is later negated by the `BaseDocument.to_json` method
document = instance.document
if '_id' in document:
document['_id'] = ObjectId(document['_id'])
update_result = collection.replace_one(filter=primary_key, replacement=document, upsert=True)
if update_result.upserted_id:
instance['_id'] = update_result.upserted_id
return update_result.upserted_id |
<SYSTEM_TASK:>
Returns the first occurrence of the effect in your pedalboard
<END_TASK>
<USER_TASK:>
Description:
def index(self):
"""
Returns the first occurrence of the effect in your pedalboard
""" |
if self.pedalboard is None:
raise IndexError('Effect not contains a pedalboard')
return self.pedalboard.effects.index(self) |
<SYSTEM_TASK:>
Force all models to call full_clean before save
<END_TASK>
<USER_TASK:>
Description:
def pre_save_full_clean_handler(sender, instance, *args, **kwargs):
""" Force all models to call full_clean before save """ |
from django.contrib.sessions.models import Session
if sender != Session:
instance.full_clean() |
<SYSTEM_TASK:>
Load the settings module pointed to by the environment variable. This
<END_TASK>
<USER_TASK:>
Description:
def _setup(self):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
""" |
settings_module = os.environ.get(ENVIRONMENT_SETTINGS_VARIABLE, 'settings')
if not settings_module:
raise ImproperlyConfigured(
'Requested settings module points to an empty variable. '
'You must either define the environment variable {0} '
'or call settings.configure() before accessing the settings.'
.format(ENVIRONMENT_SETTINGS_VARIABLE))
self._wrapped = Settings(settings_module, default_settings=global_settings) |
<SYSTEM_TASK:>
Load the context module pointed to by the environment variable. This
<END_TASK>
<USER_TASK:>
Description:
def _setup(self):
"""
Load the context module pointed to by the environment variable. This
is used the first time we need the context at all, if the user has not
previously configured the context manually.
""" |
context_module = os.environ.get(ENVIRONMENT_CONTEXT_VARIABLE, 'context')
if not context_module:
raise ImproperlyConfigured(
'Requested context points to an empty variable. '
'You must either define the environment variable {0} '
'or call context.configure() before accessing the context.'
.format(ENVIRONMENT_CONTEXT_VARIABLE))
self._wrapped = Settings(context_module, default_settings=global_context) |
<SYSTEM_TASK:>
Reads the json data in path
<END_TASK>
<USER_TASK:>
Description:
def read(path, create_file=False):
"""
Reads the json data in path
:param Path path: Path that json data will be readed
:param create_file: Creates the file if it isn't exists
:return: json data
""" |
if create_file:
with open(str(path), 'a+') as data_file:
data_file.seek(0)
return json.load(data_file)
else:
with open(str(path)) as data_file:
return json.load(data_file) |
<SYSTEM_TASK:>
This method for run some function at subprocess.
<END_TASK>
<USER_TASK:>
Description:
def run_at_subprocess(self, use_subprocess, foo, *args, **kwrags):
"""
This method for run some function at subprocess.
Very useful when you have a problem with memory leaks.
""" |
if use_subprocess is False:
return foo(*args, **kwrags)
child_pid = os.fork()
if child_pid == 0:
foo(*args, **kwrags)
sys.exit(0)
return os.waitpid(child_pid, 0)[1] == 0 |
<SYSTEM_TASK:>
removes all documents in this collection
<END_TASK>
<USER_TASK:>
Description:
def clear(self):
""" removes all documents in this collection """ |
collection = self.ds.connection(COLLECTION_MANAGED_PROCESS)
return collection.delete_many(filter={}) |
<SYSTEM_TASK:>
method traverses compressed file and calculates its MD5 checksum
<END_TASK>
<USER_TASK:>
Description:
def compute_gzip_md5(file_name):
""" method traverses compressed file and calculates its MD5 checksum """ |
md5 = hashlib.md5()
file_obj = gzip.open(file_name, 'rb')
for chunk in iter(lambda: file_obj.read(8192), ''):
md5.update(chunk)
file_obj.close()
return md5.hexdigest() |
<SYSTEM_TASK:>
methods iterates thru source family and copies its entries to target family
<END_TASK>
<USER_TASK:>
Description:
def copy_and_sum_families(family_source, family_target):
""" methods iterates thru source family and copies its entries to target family
in case key already exists in both families - then the values are added""" |
for every in family_source:
if every not in family_target:
family_target[every] = family_source[every]
else:
family_target[every] += family_source[every] |
<SYSTEM_TASK:>
Validates the format of a host entry
<END_TASK>
<USER_TASK:>
Description:
def host_errors(self, hostname, details):
"""
Validates the format of a host entry
Returns an error string, or None if it is valid.
""" |
if not hostname or not isinstance(hostname, basestring):
return "hostname_invalid"
if not isinstance(details, list):
return "host_details_not_list"
if len(details) != 3:
return "host_details_wrong_length"
if details[0] not in self.balancer.action_mapping:
return "host_action_invalid:%s" % details[0]
if not isinstance(details[1], dict):
return "host_kwargs_not_dict"
if not isinstance(details[2], bool):
return "host_match_subdomains_not_bool"
return None |
<SYSTEM_TASK:>
Callback to validate a response code.
<END_TASK>
<USER_TASK:>
Description:
def boolean(cls, true_code, false_code=None):
"""Callback to validate a response code.
The returned callback checks whether a given response has a
``status_code`` that is considered good (``true_code``) and
raise an appropriate error if not.
The optional ``false_code`` allows for a non-successful status
code to return False instead of throwing an error. This is used,
for example in relationship mutation to indicate that the
relationship was not modified.
Args:
true_code(int): The http status code to consider as a success
Keyword Args:
false_code(int): The http status code to consider a failure
Returns:
A function that given a response returns ``True`` if the
response's status code matches the given code. Raises
a :class:`HeliumError` if the response code does not
match.
""" |
def func(response):
if response is not None:
status_code = response.status
if status_code == true_code:
return True
if false_code is not None and status_code == false_code:
return False
raise error_for(response)
return func |
<SYSTEM_TASK:>
Callback to validate and extract a JSON object.
<END_TASK>
<USER_TASK:>
Description:
def json(cls, status_code, process):
"""Callback to validate and extract a JSON object.
The returned callback checks a given response for the given
status_code using :function:`response_boolean`. On success the
response JSON is parsed and returned.
Args:
status_code(int): The http status code to consider a success
Returns:
A function that given a response returns the JSON object
in the given response. Raises a :class:`HeliumError` if
the response code does not match.
""" |
def func(response):
ret = None
if cls.boolean(status_code)(response):
ret = response.json() or {}
return process(ret)
return func |
<SYSTEM_TASK:>
Get a URL.
<END_TASK>
<USER_TASK:>
Description:
def get(self, url, callback,
params=None, json=None, headers=None):
"""Get a URL.
Args:
callback(func): The response callback function
Keyword Args:
params(dict): Parameters for the request
json(dict): JSON body for the request
headers(dict): Additional headers for the request
Returns:
The result of the callback handling the resopnse from the
executed request
""" |
return self.adapter.get(url, callback,
params=params, json=json, headers=headers) |
<SYSTEM_TASK:>
Put to a URL.
<END_TASK>
<USER_TASK:>
Description:
def put(self, url, callback,
params=None, json=None, headers=None):
"""Put to a URL.
Args:
url(string): URL for the request
callback(func): The response callback function
Keyword Args:
params(dict): Parameters for the request
json(dict): JSON body for the request
headers(dict): HTTP headers for the request
Returns:
The result of the callback handling the resopnse from the
executed request
""" |
return self.adapter.put(url, callback, params=params, json=json) |
<SYSTEM_TASK:>
Patch a URL.
<END_TASK>
<USER_TASK:>
Description:
def patch(self, url, callback,
params=None, json=None, headers=None):
"""Patch a URL.
Args:
url(string): URL for the request
callback(func): The response callback function
headers(dict): HTTP headers for the request
Keyword Args:
params(dict): Parameters for the request
json(dict): JSON body for the request
Returns:
The result of the callback handling the resopnse from the
executed request
""" |
return self.adapter.patch(url, callback,
params=params, json=json, headers=headers) |
<SYSTEM_TASK:>
Delete a URL.
<END_TASK>
<USER_TASK:>
Description:
def delete(self, url, callback, json=None):
"""Delete a URL.
Args:
url(string): URL for the request
callback(func): The response callback function
Keyword Args:
json(dict): JSON body for the request
Returns:
The result of the callback handling the resopnse from the
executed request
""" |
return self.adapter.delete(url, callback, json=json) |
<SYSTEM_TASK:>
Get a live endpoint.
<END_TASK>
<USER_TASK:>
Description:
def live(self, url, resource_class, resource_args, params=None):
"""Get a live endpoint.
Args:
url(string): URL for the request
resource_class(class): The class to use for entries coming
from the live endpoint.
resource_args(dict): Additional arguments to pass to the
`resource_class` constructor
Keyword Args:
params(dict): Request parameters for the live url
Returns:
An iterator over the live endpoint. Depending on the
adapter the iterator will allow asynchronous
behavior. The default adapter will block while
iterating over the response of this method.
""" |
return self.adapter.live(self, url, resource_class, resource_args,
params=params) |
<SYSTEM_TASK:>
Return a list if banks presents in data_path
<END_TASK>
<USER_TASK:>
Description:
def load(self, system_effect):
"""
Return a list if banks presents in data_path
:param SystemEffect system_effect: SystemEffect used in pedalboards
:return list[Bank]: List with Banks persisted in
:attr:`~pluginsmanager.observer.autosaver.banks_files.BanksFiles.data_path`
""" |
persistence = PersistenceDecoder(system_effect)
banks = []
for file in glob(str(self.data_path) + "/*.json"):
bank = persistence.read(Persistence.read(file))
bank._uuid = file.split('/')[-1].split('.json')[0]
banks.append(bank)
return banks |
<SYSTEM_TASK:>
Save the bank in your file
<END_TASK>
<USER_TASK:>
Description:
def save_bank(self, bank):
"""
Save the bank in your file
:param Bank bank: Bank that will be persisted
""" |
path = self._bank_path(bank)
Persistence.save(path, bank.json) |
<SYSTEM_TASK:>
Delete the bank's file
<END_TASK>
<USER_TASK:>
Description:
def delete_bank(self, bank):
"""
Delete the bank's file
:param Bank bank: Bank that will be removed
""" |
path = self._bank_path(bank)
Persistence.delete(path) |
<SYSTEM_TASK:>
Delete all banks files.
<END_TASK>
<USER_TASK:>
Description:
def delete_all_banks(self):
"""
Delete all banks files.
Util for manual save, because isn't possible know which banks
were removed
""" |
for file in glob(str(self.data_path) + "/*.json"):
Persistence.delete(file) |
<SYSTEM_TASK:>
Extracts audio interfaces data
<END_TASK>
<USER_TASK:>
Description:
def audio_interfaces():
"""
Extracts audio interfaces data
:return list[AudioInterface]: Audio interfaces data
""" |
p = pyaudio.PyAudio()
interfaces = []
for i in range(p.get_device_count()):
data = p.get_device_info_by_index(i)
if 'hw' not in data['name']:
interfaces.append(AudioInterface(data))
p.terminate()
return interfaces |
<SYSTEM_TASK:>
method finds unit_of_work record and change its status
<END_TASK>
<USER_TASK:>
Description:
def update(self, instance):
""" method finds unit_of_work record and change its status""" |
assert isinstance(instance, UnitOfWork)
if instance.db_id:
query = {'_id': ObjectId(instance.db_id)}
else:
query = {unit_of_work.PROCESS_NAME: instance.process_name,
unit_of_work.TIMEPERIOD: instance.timeperiod,
unit_of_work.START_ID: instance.start_id,
unit_of_work.END_ID: instance.end_id}
self.ds.update(COLLECTION_UNIT_OF_WORK, query, instance)
return instance.db_id |
<SYSTEM_TASK:>
method runs the query and returns a list of filtered UnitOfWork records
<END_TASK>
<USER_TASK:>
Description:
def run_query(self, query):
""" method runs the query and returns a list of filtered UnitOfWork records """ |
cursor = self.ds.filter(COLLECTION_UNIT_OF_WORK, query)
return [UnitOfWork.from_json(document) for document in cursor] |
<SYSTEM_TASK:>
method tries to recover from DuplicateKeyError
<END_TASK>
<USER_TASK:>
Description:
def recover_from_duplicatekeyerror(self, e):
""" method tries to recover from DuplicateKeyError """ |
if isinstance(e, DuplicateKeyError):
try:
return self.get_by_params(e.process_name, e.timeperiod, e.start_id, e.end_id)
except LookupError as e:
self.logger.error('Unable to recover from DuplicateKeyError error due to {0}'.format(e), exc_info=True)
else:
msg = 'Unable to recover from DuplicateKeyError due to unspecified UOW primary key'
self.logger.error(msg) |
<SYSTEM_TASK:>
Handle when AutoReconnect is raised from pymongo. This is the standard error
<END_TASK>
<USER_TASK:>
Description:
def with_reconnect(func):
"""
Handle when AutoReconnect is raised from pymongo. This is the standard error
raised for everything from "host disconnected" to "couldn't connect to host"
and more.
The sleep handles the edge case when the state of a replica set changes, and
the cursor raises AutoReconnect because the master may have changed. It can
take some time for the replica set to stop raising this exception, and the
small sleep and iteration count gives us a couple of seconds before we fail
completely.
""" |
from pymongo.errors import AutoReconnect
@functools.wraps(func)
def _reconnector(*args, **kwargs):
for _ in range(20):
try:
return func(*args, **kwargs)
except AutoReconnect:
time.sleep(0.250)
raise
return _reconnector |
<SYSTEM_TASK:>
method clears existing log_recorder entries for given parent_object_id,
<END_TASK>
<USER_TASK:>
Description:
def attach(self):
""" method clears existing log_recorder entries for given parent_object_id,
creates a new one and attaches this handler to the logger
from this moment every log record will be recorded in the DB """ |
log_recording = LogRecording(parent_object_id=self.parent_object_id, created_at=datetime.utcnow())
self.log_recording_dao.remove(self.parent_object_id)
self.log_recording_dao.update(log_recording)
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
self.setFormatter(formatter)
self.logger.addHandler(self) |
<SYSTEM_TASK:>
Handle payment received and respond with a dictionary
<END_TASK>
<USER_TASK:>
Description:
def process_lipisha_payment(request):
"""Handle payment received and respond with a dictionary""" |
log.debug(request.POST)
schema = LipishaInitiateSchema
api_type = request.POST.get('api_type')
if api_type == TYPE_ACKNOWLEDGE:
schema = LipishaAcknowledgeSchema
form = Form(request, schema())
transaction_status_code = STATUS_SUCCESS
transaction_status = 'Processed'
transaction_status_description = 'Processed'
if form.validate():
if api_type == TYPE_INITIATE:
# Process new payment
pass
elif api_type == TYPE_ACKNOWLEDGE:
if form.data.get('transaction_status_code') == STATUS_SUCCESS:
# Process successful accknowledgement
pass
else:
log.error('Invalid payment acknowledgement')
log.error(request.POST)
else:
log.error("Error while processing payment")
for error in form.all_errors():
log.error(error)
transaction_status_code = STATUS_INITIATE_FAILURE
transaction_status = 'Error'
transaction_status_description = 'Error while processing'
if api_type == TYPE_INITIATE:
data = request.POST
return dict(
api_key=LIPISHA_API_KEY,
api_signature=LIPISHA_API_SIGNATURE,
api_version=data.get('api_version'),
api_type=TYPE_RECEIPT,
transaction_reference=data.get('transaction_reference'),
transaction_status_code=transaction_status_code,
transaction_status=transaction_status,
transaction_status_description=transaction_status_description,
)
return {} |
<SYSTEM_TASK:>
Create a new database and collection by inserting one document.
<END_TASK>
<USER_TASK:>
Description:
def create_mongo_db(database_name, collection_name, initial_document):
"""Create a new database and collection by inserting one document.""" |
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url,document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
d = json.loads(initial_document, object_pairs_hook=OrderedDict)
collection.save(d)
except:
# error connecting to mongodb
response_dict['error'] = str(sys.exc_info())
return response_dict |
<SYSTEM_TASK:>
return a list of all dbs and related collections.
<END_TASK>
<USER_TASK:>
Description:
def show_dbs():
"""return a list of all dbs and related collections.
Return an empty list on error.
""" |
l = []
mc = client_connector()
if not mc:
# The client couldn't connect
return ()
dbs = mc.database_names()
for d in dbs:
dbc = mc[d]
collections = dbc.collection_names()
collections = remove_values_from_list(collections, "system.indexes")
l.append({"name": d, "collections": collections})
return tuple(l) |
<SYSTEM_TASK:>
releases the Publisher instance for reuse
<END_TASK>
<USER_TASK:>
Description:
def put(self, publisher):
""" releases the Publisher instance for reuse""" |
if publisher.name not in self.pools:
self.pools[publisher.name] = _Pool(logger=self.logger, name=publisher.name)
self.pools[publisher.name].put(publisher) |
<SYSTEM_TASK:>
iterates thru the list of established connections and resets them by disconnecting and reconnecting
<END_TASK>
<USER_TASK:>
Description:
def reset_all(self, suppress_logging=False):
""" iterates thru the list of established connections and resets them by disconnecting and reconnecting """ |
pool_names = list(self.pools)
for name in pool_names:
self.reset(name, suppress_logging) |
<SYSTEM_TASK:>
resets established connection by disconnecting and reconnecting
<END_TASK>
<USER_TASK:>
Description:
def reset(self, name, suppress_logging=False):
""" resets established connection by disconnecting and reconnecting """ |
self._close(name, suppress_logging)
self.get(name)
self.logger.info('Reset Flopsy Pool for {0}'.format(name)) |
<SYSTEM_TASK:>
closes one particular pool and all its amqp amqp connections
<END_TASK>
<USER_TASK:>
Description:
def _close(self, name, suppress_logging):
""" closes one particular pool and all its amqp amqp connections """ |
try:
pool_names = list(self.pools)
if name in pool_names:
self.pools[name].close()
del self.pools[name]
except Exception as e:
self.logger.error('Exception on closing Flopsy Pool for {0}: {1}'.format(name, e),
exc_info=not suppress_logging) |
<SYSTEM_TASK:>
iterates thru all publisher pools and closes them
<END_TASK>
<USER_TASK:>
Description:
def close(self, suppress_logging=False):
""" iterates thru all publisher pools and closes them """ |
pool_names = list(self.pools)
for name in pool_names:
self._close(name, suppress_logging) |
<SYSTEM_TASK:>
reading box configuration entries for all boxes managed by Synergy Supervisor
<END_TASK>
<USER_TASK:>
Description:
def entries(self):
""" reading box configuration entries for all boxes managed by Synergy Supervisor """ |
list_of_rows = []
try:
list_of_rows = self.bc_dao.get_all()
except LookupError as e:
self.logger.error('MX Exception {0}'.format(e), exc_info=True)
return list_of_rows |
<SYSTEM_TASK:>
method finds Site Statistics record and update it DB representation
<END_TASK>
<USER_TASK:>
Description:
def update(self, collection_name, instance):
""" method finds Site Statistics record and update it DB representation """ |
assert isinstance(instance, SiteStatistics)
if instance.db_id:
query = {'_id': ObjectId(instance.db_id)}
else:
query = {DOMAIN_NAME: instance.domain_name,
TIMEPERIOD: instance.timeperiod}
self.ds.update(collection_name, query, instance)
return instance.db_id |
<SYSTEM_TASK:>
inserts a unit of work into MongoDB.
<END_TASK>
<USER_TASK:>
Description:
def insert(self, collection_name, instance):
""" inserts a unit of work into MongoDB. """ |
assert isinstance(instance, SiteStatistics)
collection = self.ds.connection(collection_name)
return collection.insert_one(instance.document).inserted_id |
<SYSTEM_TASK:>
method runs query on a specified collection and return a list of filtered Job records
<END_TASK>
<USER_TASK:>
Description:
def run_query(self, collection_name, query):
""" method runs query on a specified collection and return a list of filtered Job records """ |
cursor = self.ds.filter(collection_name, query)
return [Job.from_json(document) for document in cursor] |
<SYSTEM_TASK:>
method looks for suitable job records in all Job collections and returns them as a dict
<END_TASK>
<USER_TASK:>
Description:
def retrieve_records(self, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled):
""" method looks for suitable job records in all Job collections and returns them as a dict""" |
resp = dict()
resp.update(self._search_by_level(COLLECTION_JOB_HOURLY, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled))
resp.update(self._search_by_level(COLLECTION_JOB_DAILY, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled))
timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_MONTHLY, timeperiod)
resp.update(self._search_by_level(COLLECTION_JOB_MONTHLY, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled))
timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, timeperiod)
resp.update(self._search_by_level(COLLECTION_JOB_YEARLY, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled))
return resp |
<SYSTEM_TASK:>
method looks for suitable UOW records and returns them as a dict
<END_TASK>
<USER_TASK:>
Description:
def retrieve_records(self, timeperiod, include_running,
include_processed, include_noop, include_failed, include_disabled):
""" method looks for suitable UOW records and returns them as a dict""" |
resp = dict()
try:
query = unit_of_work_dao.QUERY_GET_FREERUN_SINCE(timeperiod, include_running,
include_processed, include_noop, include_failed)
records_list = self.uow_dao.run_query(query)
if len(records_list) == 0:
self.logger.warning('MX: no Freerun UOW records found since {0}.'.format(timeperiod))
for uow_record in records_list:
# freerun uow.process_name is a composite in format <process_name::entry_name>
handler_key = split_schedulable_name(uow_record.process_name)
if handler_key not in self.freerun_handlers:
continue
thread_handler = self.freerun_handlers[handler_key]
if not include_disabled and not thread_handler.process_entry.is_on:
continue
resp[uow_record.key] = uow_record.document
except Exception as e:
self.logger.error('MX Dashboard FreerunStatements error: {0}'.format(e))
return resp |
<SYSTEM_TASK:>
Invokes the mod-host process.
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""
Invokes the mod-host process.
mod-host requires JACK to be running.
mod-host does not startup JACK automatically, so you need to start it before running mod-host.
.. note::
This function is experimental. There is no guarantee that the process will actually be initiated.
""" |
if self.address != 'localhost':
raise ModHostError('The host configured in the constructor isn''t "localhost". '
'It is not possible to start a process on another device.')
try:
subprocess.call([self.process, '-p', str(self.port)])
except FileNotFoundError as e:
exception = ModHostError(
'mod-host not found. Did you install it? '
'(https://github.com/moddevices/mod-host#building)'
)
raise exception from e
self._started_with_this_api = True |
<SYSTEM_TASK:>
Remove the audio plugins loaded and closes connection with mod-host.
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""
Remove the audio plugins loaded and closes connection with mod-host.
.. note::
If the mod-host process has been created with :meth:`~pluginsmanager.observer.mod_host.ModHost.start()`
method, it will be finished.
""" |
if self.host is None:
raise ModHostError('There is no established connection with mod-host. '
'Did you call the `connect()` method?')
super(ModHost, self).close()
if self._started_with_this_api:
self.host.quit()
else:
self.host.close() |
<SYSTEM_TASK:>
function reads managed_process and updates context entries appropriately
<END_TASK>
<USER_TASK:>
Description:
def synch_db():
""" function reads managed_process and updates context entries appropriately """ |
logger = get_logger(PROCESS_SCHEDULER)
managed_process_dao = ManagedProcessDao(logger)
try:
process_entries = managed_process_dao.get_all()
except LookupError:
logger.error('Synergy DB is not initialized. Aborting.')
exit(1)
for process_entry in process_entries:
process_name = process_entry.process_name
if process_name not in context.process_context:
logger.warning('Process {0} has no reflection in the context. Skipping it.'.format(process_name))
continue
if not isinstance(context.process_context[process_name], ManagedProcessEntry):
logger.error('Process entry {0} of non-managed type {1} found in managed_process table. Skipping it.'
.format(process_name, context.process_context[process_name].__class__.__name__))
continue
context.process_context[process_name] = process_entry
logger.info('Context updated with process entry {0}.'.format(process_entry.key)) |
<SYSTEM_TASK:>
writes to managed_process table records from the context.process_context
<END_TASK>
<USER_TASK:>
Description:
def update_db():
""" writes to managed_process table records from the context.process_context """ |
logger = get_logger(PROCESS_SCHEDULER)
managed_process_dao = ManagedProcessDao(logger)
managed_process_dao.clear()
for process_name, process_entry in context.process_context.items():
if not isinstance(process_entry, ManagedProcessEntry):
continue
managed_process_dao.update(process_entry)
logger.info('Updated DB with process entry {0} from the context.'.format(process_entry.key)) |
<SYSTEM_TASK:>
Moves a item list to new position
<END_TASK>
<USER_TASK:>
Description:
def move(self, item, new_position):
"""
Moves a item list to new position
Calls observer ``self.observer(UpdateType.DELETED, item, index)``
and observer ``self.observer(UpdateType.CREATED, item, index)``
if ``val != self[index]``
:param item: Item that will be moved to new_position
:param new_position: Item's new position
""" |
if item == self[new_position]:
return
self.remove(item)
self.insert(new_position, item) |
<SYSTEM_TASK:>
method performs logging into log file and Timetable's tree node
<END_TASK>
<USER_TASK:>
Description:
def _log_message(self, level, process_name, timeperiod, msg):
""" method performs logging into log file and Timetable's tree node""" |
self.timetable.add_log_entry(process_name, timeperiod, msg)
self.logger.log(level, msg) |
<SYSTEM_TASK:>
method is valid for processes having time_grouping != 1.
<END_TASK>
<USER_TASK:>
Description:
def _process_noop_timeperiod(self, job_record):
""" method is valid for processes having time_grouping != 1.
should a job record fall in-between grouped time milestones,
its state should be set to STATE_NOOP without any processing """ |
job_record.state = job.STATE_NOOP
self.job_dao.update(job_record)
time_grouping = context.process_context[job_record.process_name].time_grouping
msg = 'Job {0}@{1} with time_grouping {2} was transferred to STATE_NOOP' \
.format(job_record.process_name, job_record.timeperiod, time_grouping)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg) |
<SYSTEM_TASK:>
method logs a warning message notifying that the job is no longer govern by this state machine
<END_TASK>
<USER_TASK:>
Description:
def _process_terminal_state(self, job_record):
""" method logs a warning message notifying that the job is no longer govern by this state machine """ |
msg = 'Job {0} for {1}@{2} is in the terminal state {3}, ' \
'and is no further govern by the State Machine {4}' \
.format(job_record.db_id, job_record.process_name, job_record.timeperiod, job_record.state, self.name)
self._log_message(WARNING, job_record.process_name, job_record.timeperiod, msg) |
<SYSTEM_TASK:>
method updates job record with a new unit_of_work and new state
<END_TASK>
<USER_TASK:>
Description:
def update_job(self, job_record, uow, new_state):
""" method updates job record with a new unit_of_work and new state""" |
original_job_state = job_record.state
job_record.state = new_state
job_record.related_unit_of_work = uow.db_id
self.job_dao.update(job_record)
msg = 'Updated Job {0} for {1}@{2}: state transfer {3} -> {4};' \
.format(job_record.db_id, job_record.process_name, job_record.timeperiod, original_job_state, new_state)
self._log_message(INFO, job_record.process_name, job_record.timeperiod, msg) |
<SYSTEM_TASK:>
reads JSON request from the mq message and delivers it for processing
<END_TASK>
<USER_TASK:>
Description:
def _mq_callback(self, message):
""" reads JSON request from the mq message and delivers it for processing """ |
while threading.active_count() > settings.settings['bash_runnable_count'] + self.initial_thread_count:
time.sleep(0.1)
t = BashRunnable(self.logger, message, self.consumer, self.performance_tracker)
t.daemon = True
t.start() |
<SYSTEM_TASK:>
Saves the state if it has changed.
<END_TASK>
<USER_TASK:>
Description:
def save_loop(self):
"""
Saves the state if it has changed.
""" |
last_hash = hash(repr(self.hosts))
while self.running:
eventlet.sleep(self.save_interval)
next_hash = hash(repr(self.hosts))
if next_hash != last_hash:
self.save()
last_hash = next_hash |
<SYSTEM_TASK:>
Accepts management requests.
<END_TASK>
<USER_TASK:>
Description:
def management_loop(self, address, family):
"""
Accepts management requests.
""" |
try:
sock = eventlet.listen(address, family)
except socket.error, e:
logging.critical("Cannot listen on (%s, %s): %s" % (address, family, e))
return
# Sleep to ensure we've dropped privileges by the time we start serving
eventlet.sleep(0.5)
# Actually serve management
logging.info("Listening for management on %s" % (address, ))
management_app = ManagementApp(self)
try:
with open("/dev/null", "w") as log_dest:
wsgi.server(
sock,
management_app.handle,
log = log_dest,
)
finally:
sock.close() |
<SYSTEM_TASK:>
Accepts incoming connections.
<END_TASK>
<USER_TASK:>
Description:
def listen_loop(self, address, family, internal=False):
"""
Accepts incoming connections.
""" |
try:
sock = eventlet.listen(address, family)
except socket.error, e:
if e.errno == errno.EADDRINUSE:
logging.critical("Cannot listen on (%s, %s): already in use" % (address, family))
raise
elif e.errno == errno.EACCES and address[1] <= 1024:
logging.critical("Cannot listen on (%s, %s) (you might need to launch as root)" % (address, family))
return
logging.critical("Cannot listen on (%s, %s): %s" % (address, family, e))
return
# Sleep to ensure we've dropped privileges by the time we start serving
eventlet.sleep(0.5)
# Start serving
logging.info("Listening for requests on %s" % (address, ))
try:
eventlet.serve(
sock,
lambda sock, addr: self.handle(sock, addr, internal),
concurrency = 10000,
)
finally:
sock.close() |
<SYSTEM_TASK:>
Handles an incoming HTTP connection.
<END_TASK>
<USER_TASK:>
Description:
def handle(self, sock, address, internal=False):
"""
Handles an incoming HTTP connection.
""" |
try:
sock = StatsSocket(sock)
rfile = sock.makefile('rb', 4096)
# Read the first line
first = rfile.readline().strip("\r\n")
words = first.split()
# Ensure it looks kind of like HTTP
if not (2 <= len(words) <= 3):
sock.sendall("HTTP/1.0 400 Bad Request\r\nConnection: close\r\nContent-length: 0\r\n\r\n")
return
path = words[1]
# Read the headers
headers = mimetools.Message(rfile, 0)
# Work out the host
try:
host = headers['Host']
except KeyError:
host = "unknown"
headers['Connection'] = "close"
if not internal:
headers['X-Forwarded-For'] = address[0]
headers['X-Forwarded-Protocol'] = ""
headers['X-Forwarded-Proto'] = ""
# Make sure they're not using odd encodings
if "Transfer-Encoding" in headers:
sock.sendall("HTTP/1.0 411 Length Required\r\nConnection: close\r\nContent-length: 0\r\n\r\n")
return
# Match the host to an action
protocol = "http"
if headers.get('X-Forwarded-Protocol', headers.get('X-Forwarded-Proto', "")).lower() in ("ssl", "https"):
protocol = "https"
action = self.resolve_host(host, protocol)
# Record us as an open connection
stats_dict = self.stats.setdefault(action.matched_host, {})
stats_dict['open_requests'] = stats_dict.get('open_requests', 0) + 1
# Run the action
try:
rfile._rbuf.seek(0)
action.handle(
sock = sock,
read_data = first + "\r\n" + str(headers) + "\r\n" + rfile._rbuf.read(),
path = path,
headers = headers,
)
finally:
stats_dict['open_requests'] -= 1
stats_dict['completed_requests'] = stats_dict.get('completed_requests', 0) + 1
stats_dict['bytes_sent'] = stats_dict.get('bytes_sent', 0) + sock.bytes_sent
stats_dict['bytes_received'] = stats_dict.get('bytes_received', 0) + sock.bytes_received
except socket.error, e:
if e.errno not in (errno.EPIPE, errno.ETIMEDOUT, errno.ECONNRESET):
logging.error(traceback.format_exc())
except:
logging.error(traceback.format_exc())
try:
sock.sendall("HTTP/1.0 500 Internal Server Error\r\n\r\nThere has been an internal error in the load balancer.")
except socket.error, e:
if e.errno != errno.EPIPE:
raise
finally:
try:
sock.close()
rfile.close()
except:
logging.error(traceback.format_exc()) |
<SYSTEM_TASK:>
loads scheduler managed entries. no start-up procedures are performed
<END_TASK>
<USER_TASK:>
Description:
def _load_managed_entries(self):
""" loads scheduler managed entries. no start-up procedures are performed """ |
for process_name, process_entry in context.process_context.items():
if isinstance(process_entry, ManagedProcessEntry):
function = self.fire_managed_worker
else:
self.logger.warning('Skipping non-managed context entry {0} of type {1}.'
.format(process_name, process_entry.__class__.__name__))
continue
try:
self._register_process_entry(process_entry, function)
except Exception:
self.logger.error('Managed Thread Handler {0} failed to start. Skipping it.'
.format(process_entry.key), exc_info=True) |
<SYSTEM_TASK:>
reads scheduler managed entries and starts their timers to trigger events
<END_TASK>
<USER_TASK:>
Description:
def _load_freerun_entries(self):
""" reads scheduler managed entries and starts their timers to trigger events """ |
freerun_entries = self.freerun_process_dao.get_all()
for freerun_entry in freerun_entries:
try:
self._register_process_entry(freerun_entry, self.fire_freerun_worker)
except Exception:
self.logger.error('Freerun Thread Handler {0} failed to start. Skipping it.'
.format(freerun_entry.key), exc_info=True) |
<SYSTEM_TASK:>
reads managed process entries and starts timer instances; starts dependant threads
<END_TASK>
<USER_TASK:>
Description:
def start(self, *_):
""" reads managed process entries and starts timer instances; starts dependant threads """ |
self.logger.info('Starting Scheduler...')
db_manager.synch_db()
self._load_managed_entries()
try:
self._load_freerun_entries()
except LookupError as e:
self.logger.warning('DB Lookup: {0}'.format(e))
# Scheduler is initialized and running. GarbageCollector can be safely started
self.gc.start()
# Job/UOW Status Listeners can be safely started
self.uow_listener.start()
self.job_listener.start()
self.logger.info('Startup Sequence Completed. Starting MX.')
# Management Extension (MX) should be the last to start
self.mx.start() |
<SYSTEM_TASK:>
requests next valid job for given process and manages its state
<END_TASK>
<USER_TASK:>
Description:
def fire_managed_worker(self, thread_handler_header):
""" requests next valid job for given process and manages its state """ |
def _fire_worker(process_entry, prev_job_record):
assert isinstance(process_entry, ManagedProcessEntry)
job_record = self.timetable.get_next_job_record(process_entry.process_name)
state_machine = self.timetable.state_machines[process_entry.state_machine_name]
if job_record == prev_job_record:
# avoid the loop
return None
if not state_machine.run_on_active_timeperiod:
time_qualifier = process_entry.time_qualifier
incremented_timeperiod = time_helper.increment_timeperiod(time_qualifier, job_record.timeperiod)
dt_record_timestamp = time_helper.synergy_to_datetime(time_qualifier, incremented_timeperiod)
dt_record_timestamp += timedelta(minutes=LAG_5_MINUTES)
if datetime.utcnow() <= dt_record_timestamp:
self.logger.info('Job {0} for {1}@{2} will not be triggered until {3}.'
.format(job_record.db_id,
job_record.process_name,
job_record.timeperiod,
dt_record_timestamp.strftime('%Y-%m-%d %H:%M:%S')))
return None
blocking_type = process_entry.blocking_type
if blocking_type == BLOCKING_DEPENDENCIES:
state_machine.manage_job_with_blocking_dependencies(job_record)
elif blocking_type == BLOCKING_CHILDREN:
state_machine.manage_job_with_blocking_children(job_record)
elif blocking_type == BLOCKING_NORMAL:
state_machine.manage_job(job_record)
else:
raise ValueError('Unknown managed process type {0}'.format(blocking_type))
return job_record
try:
assert isinstance(thread_handler_header, ThreadHandlerHeader)
self.logger.info('{0} {{'.format(thread_handler_header.key))
job_record = _fire_worker(thread_handler_header.process_entry, None)
while job_record and job_record.is_finished:
job_record = _fire_worker(thread_handler_header.process_entry, job_record)
except Exception as e:
self.logger.error('Exception: {0}'.format(e), exc_info=True)
finally:
self.logger.info('}') |
<SYSTEM_TASK:>
fires free-run worker with no dependencies to track
<END_TASK>
<USER_TASK:>
Description:
def fire_freerun_worker(self, thread_handler_header):
""" fires free-run worker with no dependencies to track """ |
try:
assert isinstance(thread_handler_header, ThreadHandlerHeader)
self.logger.info('{0} {{'.format(thread_handler_header.key))
state_machine = self.timetable.state_machines[STATE_MACHINE_FREERUN]
state_machine.manage_schedulable(thread_handler_header.process_entry)
except Exception as e:
self.logger.error('fire_freerun_worker: {0}'.format(e))
finally:
self.logger.info('}') |
<SYSTEM_TASK:>
The id of the sensor of this data point.
<END_TASK>
<USER_TASK:>
Description:
def sensor_id(self):
"""The id of the sensor of this data point.
Returns:
The id of the sensor that generated this datapoint. Will
throw an AttributeError if no sensor id was found in the
underlyign data.
""" |
if hasattr(self, '_sensor_id'):
return self._sensor_id
relationships = self._json_data.get('relationships')
sensor_id = relationships.get('sensor').get('data').get('id')
self._sensor_id = sensor_id
return sensor_id |
<SYSTEM_TASK:>
Post a new reading to a timeseries.
<END_TASK>
<USER_TASK:>
Description:
def create(self, port, value, timestamp=None):
"""Post a new reading to a timeseries.
A reading is comprised of a `port`, a `value` and a timestamp.
A port is like a tag for the given reading and gives an
indication of the meaning of the value.
The value of the reading can be any valid json value.
The timestamp is considered the time the reading was taken, as
opposed to the `created` time of the data-point which
represents when the data-point was stored in the Helium
API. If the timestamp is not given the server will construct a
timestemp upon receiving the new reading.
Args:
port(string): The port to use for the new data-point
value: The value for the new data-point
Keyword Args:
timestamp(:class:`datetime`): An optional :class:`datetime` object
""" |
session = self._session
datapoint_class = self._datapoint_class
attributes = {
'port': port,
'value': value,
}
if timestamp is not None:
attributes['timestamp'] = to_iso_date(timestamp)
attributes = build_request_body('data-point', None,
attributes=attributes)
def _process(json):
data = json.get('data')
return datapoint_class(data, session)
return session.post(self._base_url, CB.json(201, _process),
json=attributes) |
<SYSTEM_TASK:>
Get a live stream of timeseries readings.
<END_TASK>
<USER_TASK:>
Description:
def live(self):
"""Get a live stream of timeseries readings.
This returns an Iterable over a live stream of readings. Note
that the result will need to be closed since the system can
not tell when you'll be done with it.
You can either call ``close`` on the endpoint when you're or
use the context management facilities of the endpoint.
.. code-block:: python
# Fetch a sensor
timeseries = sensor.timeseries()
# ensure live endpoint closed
with timeseries.live() as live:
# Wait for 10 readings
first10 = list(islice(live, 10))
Returns:
""" |
session = self._session
url = "{}/live".format(self._base_url)
supported_params = frozenset(['filter[port]'])
params = {k: v for k, v in iteritems(self._params)
if k in supported_params}
return session.live(url, self._datapoint_class, {
'is_aggregate': self._is_aggregate
}, params=params) |
<SYSTEM_TASK:>
register dependencies between trees
<END_TASK>
<USER_TASK:>
Description:
def _register_dependencies(self):
""" register dependencies between trees""" |
for tree_name, context_entry in context.timetable_context.items():
tree = self.trees[tree_name]
assert isinstance(tree, MultiLevelTree)
for dependent_on in context_entry.dependent_on:
dependent_on_tree = self.trees[dependent_on]
assert isinstance(dependent_on_tree, MultiLevelTree)
tree.register_dependent_on(dependent_on_tree) |
<SYSTEM_TASK:>
returns list of trees that are dependent_on given tree_obj
<END_TASK>
<USER_TASK:>
Description:
def _find_dependant_trees(self, tree_obj):
""" returns list of trees that are dependent_on given tree_obj """ |
dependant_trees = []
for tree_name, tree in self.trees.items():
if tree_obj in tree.dependent_on:
dependant_trees.append(tree)
return dependant_trees |
<SYSTEM_TASK:>
method reprocesses the node and all its dependants and parent nodes
<END_TASK>
<USER_TASK:>
Description:
def reprocess_tree_node(self, tree_node, tx_context=None):
""" method reprocesses the node and all its dependants and parent nodes """ |
if not tx_context:
# create transaction context if one was not provided
# format: {process_name: {timeperiod: AbstractTreeNode} }
tx_context = collections.defaultdict(dict)
if tree_node.parent is None:
# do not process 'root' - the only node that has None as 'parent'
return tx_context
if tree_node.timeperiod in tx_context[tree_node.process_name]:
# the node has already been marked for re-processing
return tx_context
if tree_node.job_record.is_embryo:
# the node does not require re-processing
pass
else:
state_machine_name = context.process_context[tree_node.process_name].state_machine_name
state_machine = self.state_machines[state_machine_name]
state_machine.reprocess_job(tree_node.job_record)
tx_context[tree_node.process_name][tree_node.timeperiod] = tree_node
self.reprocess_tree_node(tree_node.parent, tx_context)
dependant_nodes = self._find_dependant_tree_nodes(tree_node)
for node in dependant_nodes:
self.reprocess_tree_node(node, tx_context)
return tx_context |
<SYSTEM_TASK:>
method skips the node and all its dependants and child nodes
<END_TASK>
<USER_TASK:>
Description:
def skip_tree_node(self, tree_node, tx_context=None):
""" method skips the node and all its dependants and child nodes """ |
if not tx_context:
# create transaction context if one was not provided
# format: {process_name: {timeperiod: AbstractTreeNode} }
tx_context = collections.defaultdict(dict)
if tree_node.timeperiod in tx_context[tree_node.process_name]:
# the node has already been marked for skipping
return tx_context
if tree_node.job_record.is_finished:
# the node is finished and does not require skipping
pass
else:
state_machine_name = context.process_context[tree_node.process_name].state_machine_name
state_machine = self.state_machines[state_machine_name]
state_machine.skip_job(tree_node.job_record)
tx_context[tree_node.process_name][tree_node.timeperiod] = tree_node
for timeperiod, node in tree_node.children.items():
self.skip_tree_node(node, tx_context)
dependant_nodes = self._find_dependant_tree_nodes(tree_node)
for node in dependant_nodes:
self.skip_tree_node(node, tx_context)
return tx_context |
<SYSTEM_TASK:>
- looks for an existing job record in the DB, and if not found
<END_TASK>
<USER_TASK:>
Description:
def assign_job_record(self, tree_node):
""" - looks for an existing job record in the DB, and if not found
- creates a job record in STATE_EMBRYO and bind it to the given tree node """ |
try:
job_record = self.job_dao.get_one(tree_node.process_name, tree_node.timeperiod)
except LookupError:
state_machine_name = context.process_context[tree_node.process_name].state_machine_name
state_machine = self.state_machines[state_machine_name]
job_record = state_machine.create_job(tree_node.process_name, tree_node.timeperiod)
tree_node.job_record = job_record |
<SYSTEM_TASK:>
return tree that is managing time-periods for given process
<END_TASK>
<USER_TASK:>
Description:
def get_tree(self, process_name):
""" return tree that is managing time-periods for given process""" |
for tree_name, tree in self.trees.items():
if process_name in tree:
return tree |
<SYSTEM_TASK:>
method iterated thru all documents in all job collections and builds a tree of known system state
<END_TASK>
<USER_TASK:>
Description:
def _build_tree_by_level(self, time_qualifier, collection_name, since):
""" method iterated thru all documents in all job collections and builds a tree of known system state""" |
invalid_tree_records = dict()
invalid_tq_records = dict()
try:
job_records = self.job_dao.get_all(collection_name, since)
for job_record in job_records:
tree = self.get_tree(job_record.process_name)
if tree is None:
utils.increment_family_property(job_record.process_name, invalid_tree_records)
continue
job_time_qualifier = context.process_context[job_record.process_name].time_qualifier
if time_qualifier != job_time_qualifier:
utils.increment_family_property(job_record.process_name, invalid_tq_records)
continue
tree.update_node(job_record)
except LookupError:
self.logger.warning('No job records in {0}.'.format(collection_name))
for name, counter in invalid_tree_records.items():
self.logger.warning('Skipping {0} job records for {1} since no tree is handling it.'
.format(counter, name))
for name, counter in invalid_tq_records.items():
self.logger.warning('Skipping {0} job records for {1} since the process has different time qualifier.'
.format(counter, name)) |
<SYSTEM_TASK:>
method iterates thru all objects older than synergy_start_timeperiod parameter in job collections
<END_TASK>
<USER_TASK:>
Description:
def load_tree(self):
""" method iterates thru all objects older than synergy_start_timeperiod parameter in job collections
and loads them into this timetable""" |
timeperiod = settings.settings['synergy_start_timeperiod']
yearly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_YEARLY, timeperiod)
monthly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_MONTHLY, timeperiod)
daily_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_DAILY, timeperiod)
hourly_timeperiod = time_helper.cast_to_time_qualifier(QUALIFIER_HOURLY, timeperiod)
self._build_tree_by_level(QUALIFIER_HOURLY, COLLECTION_JOB_HOURLY, since=hourly_timeperiod)
self._build_tree_by_level(QUALIFIER_DAILY, COLLECTION_JOB_DAILY, since=daily_timeperiod)
self._build_tree_by_level(QUALIFIER_MONTHLY, COLLECTION_JOB_MONTHLY, since=monthly_timeperiod)
self._build_tree_by_level(QUALIFIER_YEARLY, COLLECTION_JOB_YEARLY, since=yearly_timeperiod) |
<SYSTEM_TASK:>
method is called from abstract_state_machine.manage_job to notify about job's failed processing
<END_TASK>
<USER_TASK:>
Description:
def skip_if_needed(self, job_record):
""" method is called from abstract_state_machine.manage_job to notify about job's failed processing
if should_skip_node returns True - the node's job_record is transferred to STATE_SKIPPED """ |
tree = self.get_tree(job_record.process_name)
node = tree.get_node(job_record.process_name, job_record.timeperiod)
if tree.should_skip_tree_node(node):
self.skip_tree_node(node) |
<SYSTEM_TASK:>
adds a non-persistent log entry to the tree node
<END_TASK>
<USER_TASK:>
Description:
def add_log_entry(self, process_name, timeperiod, msg):
""" adds a non-persistent log entry to the tree node """ |
tree = self.get_tree(process_name)
node = tree.get_node(process_name, timeperiod)
node.add_log_entry([datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'), msg]) |
<SYSTEM_TASK:>
Save all data from a banks_manager
<END_TASK>
<USER_TASK:>
Description:
def save(self, banks_manager):
"""
Save all data from a banks_manager
:param BanksManager banks_manager: BanksManager that your banks data will be persisted
""" |
self.banks_files.delete_all_banks()
self.banks_files.save(banks_manager)
self.index_file.save(banks_manager) |
<SYSTEM_TASK:>
Applies kwargs arguments to the instance passed as the first
<END_TASK>
<USER_TASK:>
Description:
def apply(cls, self, *args, **kwargs):
"""
Applies kwargs arguments to the instance passed as the first
argument to the call.
For defined INPUTS, OUTPUTS and PARAMETERS the method extracts
a corresponding value from kwargs and sets it as an instance attribute.
For example, if the processor has a 'foo' parameter declared and
'foo = something' is passed to apply(), self.foo will become
'something'.
""" |
for key in kwargs:
if key in [ x.name for x in cls.INPUTS ]:
setattr(self, key, kwargs[key])
if key in [ x.name for x in cls.OUTPUTS ]:
setattr(self, key, kwargs[key])
if key in [ x.name for x in cls.PARAMETERS ]:
setattr(self, key, kwargs[key]) |
<SYSTEM_TASK:>
Generate spec for the processor as a Python dictionary.
<END_TASK>
<USER_TASK:>
Description:
def spec(self):
"""
Generate spec for the processor as a Python dictionary.
A spec is a standard way to describe a MountainLab processor in a way
that is easy to process, yet still understandable by humans.
This method generates a Python dictionary that complies with a spec
definition.
""" |
pspec = {}
pspec['name'] = self.NAME
pspec['version'] = self.VERSION
pspec['description'] = self.DESCRIPTION
#if hasattr(self, 'run') and callable(self.run):
components = [sys.argv[0], self.NAME]
if self.USE_ARGUMENTS: components.append('$(arguments)')
pspec['exe_command'] = self.COMMAND or ' '.join(components)
pspec['inputs'] = [ inp.spec for inp in self.INPUTS ]
pspec['outputs'] = [ out.spec for out in self.OUTPUTS ]
pspec['parameters'] = [ param.spec for param in self.PARAMETERS ]
if hasattr(self, 'test') and callable(self.test):
pspec['has_test'] = True
return pspec |
<SYSTEM_TASK:>
Executes the processor passing given arguments.
<END_TASK>
<USER_TASK:>
Description:
def invoke(proc, args=None, *, _instance = None, **kwargs):
"""
Executes the processor passing given arguments.
:param args: a list of parameters in --key=value format.
""" |
if args is None:
args=[]
for kwargname in kwargs:
args.append('--'+kwargname)
args.append('{}'.format(kwargs[kwargname]))
parser = proc.invoke_parser(noexit=(_instance is not None))
opts = parser.parse_args(args)
kwargs0 = {}
def handle_set(opts, dataset, kwargs0, canMulti = False):
for elem in dataset:
elemname = elem.name
# ml-run-process passes values for not provided inputs, outputs and params as empty strings ('')
if hasattr(opts, elemname) and getattr(opts, elemname) not in [None, '']:
# value for element was given in the invocation
elemvalue = getattr(opts, elemname)
if canMulti and isinstance(elemvalue, list):
elemlist = elemvalue
else:
elemlist = [ elemvalue ]
for elemelem in elemlist:
for validator in elem.validators: validator(elemelem)
if hasattr(opts, elem.name):
prepared = elem.prepare(elemvalue) or elemvalue
kwargs0[elem.name] = prepared
elif elem.optional:
# value was not set but is optional so ignore it
kwargs0[elem.name] = None
else:
# value was not set and is mandatory -- error
raise AttributeError('Missing value for {} '.format(elemname))
try:
handle_set(opts, proc.INPUTS, kwargs0, True)
handle_set(opts, proc.OUTPUTS, kwargs0, True)
for param in proc.PARAMETERS:
if hasattr(opts, param.name) and getattr(opts, param.name) is not None and getattr(opts, param.name) is not '':
value = getattr(opts, param.name)
# validate if needed
for validator in param.validators:
validator(value)
# if param is a tuple of choices, each choice is a tuple itself
# with first element of the input value and second element
# containing the value to be passed to the processor
if param.choices and isinstance(param.choices, tuple):
for choice in param.choices:
if choice[0] == value:
kwargs0[param.name] = choice[1]
break
else:
kwargs0[param.name] = value
elif param.optional:
kwargs0[param.name] = param.default
else:
raise AttributeError('Missing value for {} parameter'.format(param.name))
if not _instance:
_instance = proc(**kwargs0)
else:
_instance.apply(_instance, **kwargs0)
return _instance.run()
# todo: cleanup
except Exception as e:
print("Error:", e)
# traceback.print_exc()
raise |
<SYSTEM_TASK:>
Returns the first occurrence of the pedalboard in your bank
<END_TASK>
<USER_TASK:>
Description:
def index(self):
"""
Returns the first occurrence of the pedalboard in your bank
""" |
if self.bank is None:
raise IndexError('Pedalboard not contains a bank')
return self.bank.pedalboards.index(self) |
<SYSTEM_TASK:>
check for process' pid file and returns pid from there
<END_TASK>
<USER_TASK:>
Description:
def get_process_pid(process_name):
""" check for process' pid file and returns pid from there """ |
try:
pid_filename = get_pid_filename(process_name)
with open(pid_filename, mode='r') as pid_file:
pid = int(pid_file.read().strip())
except IOError:
pid = None
return pid |
<SYSTEM_TASK:>
stops the timer. call_back function is not called
<END_TASK>
<USER_TASK:>
Description:
def cancel(self):
""" stops the timer. call_back function is not called """ |
self.event.clear()
if self.__timer is not None:
self.__timer.cancel() |
<SYSTEM_TASK:>
calls the call_back function. interrupts the timer to start a new countdown
<END_TASK>
<USER_TASK:>
Description:
def trigger(self):
""" calls the call_back function. interrupts the timer to start a new countdown """ |
self.call_back(*self.args, **self.kwargs)
if self.__timer is not None:
self.__timer.cancel() |
<SYSTEM_TASK:>
method casts given timeperiod accordingly to time qualifier.
<END_TASK>
<USER_TASK:>
Description:
def cast_to_time_qualifier(time_qualifier, timeperiod):
""" method casts given timeperiod accordingly to time qualifier.
For example, will cast session time format of 20100101193412 to 2010010119 with QUALIFIER_HOURLY """ |
if time_qualifier == QUALIFIER_HOURLY:
date_format = SYNERGY_HOURLY_PATTERN
elif time_qualifier == QUALIFIER_DAILY:
date_format = SYNERGY_DAILY_PATTERN
elif time_qualifier == QUALIFIER_MONTHLY:
date_format = SYNERGY_MONTHLY_PATTERN
elif time_qualifier == QUALIFIER_YEARLY:
date_format = SYNERGY_YEARLY_PATTERN
else:
raise ValueError('unknown time qualifier: {0}'.format(time_qualifier))
pattern = define_pattern(timeperiod)
t = datetime.strptime(timeperiod, pattern)
return t.strftime(date_format) |
<SYSTEM_TASK:>
method parses datetime and returns Synergy Date
<END_TASK>
<USER_TASK:>
Description:
def datetime_to_synergy(time_qualifier, dt):
""" method parses datetime and returns Synergy Date""" |
if time_qualifier == QUALIFIER_HOURLY:
date_format = SYNERGY_HOURLY_PATTERN
elif time_qualifier == QUALIFIER_DAILY:
date_format = SYNERGY_DAILY_PATTERN
elif time_qualifier == QUALIFIER_MONTHLY:
date_format = SYNERGY_MONTHLY_PATTERN
elif time_qualifier == QUALIFIER_YEARLY:
date_format = SYNERGY_YEARLY_PATTERN
elif time_qualifier == QUALIFIER_REAL_TIME:
date_format = SYNERGY_SESSION_PATTERN
else:
raise ValueError('unknown time qualifier: {0}'.format(time_qualifier))
return dt.strftime(date_format) |
<SYSTEM_TASK:>
method receives timeperiod in Synergy format YYYYMMDDHH and convert it to UTC _naive_ datetime
<END_TASK>
<USER_TASK:>
Description:
def synergy_to_datetime(time_qualifier, timeperiod):
""" method receives timeperiod in Synergy format YYYYMMDDHH and convert it to UTC _naive_ datetime""" |
if time_qualifier == QUALIFIER_HOURLY:
date_format = SYNERGY_HOURLY_PATTERN
elif time_qualifier == QUALIFIER_DAILY:
date_format = SYNERGY_DAILY_PATTERN
elif time_qualifier == QUALIFIER_MONTHLY:
date_format = SYNERGY_MONTHLY_PATTERN
elif time_qualifier == QUALIFIER_YEARLY:
date_format = SYNERGY_YEARLY_PATTERN
elif time_qualifier == QUALIFIER_REAL_TIME:
date_format = SYNERGY_SESSION_PATTERN
else:
raise ValueError('unknown time qualifier: {0}'.format(time_qualifier))
return datetime.strptime(timeperiod, date_format).replace(tzinfo=None) |
<SYSTEM_TASK:>
converts Synergy Timestamp for session to UTC zone seconds since epoch
<END_TASK>
<USER_TASK:>
Description:
def session_to_epoch(timestamp):
""" converts Synergy Timestamp for session to UTC zone seconds since epoch """ |
utc_timetuple = datetime.strptime(timestamp, SYNERGY_SESSION_PATTERN).replace(tzinfo=None).utctimetuple()
return calendar.timegm(utc_timetuple) |
<SYSTEM_TASK:>
Add an LV2 plugin encapsulated as a jack client
<END_TASK>
<USER_TASK:>
Description:
def add(self, effect):
"""
Add an LV2 plugin encapsulated as a jack client
:param Lv2Effect effect: Effect that will be loaded as LV2 plugin encapsulated
""" |
effect.instance = self.instance_index
self.instance_index += 1
self.connection.send(ProtocolParser.add(effect)) |
<SYSTEM_TASK:>
Quit the connection with mod-host
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""
Quit the connection with mod-host
""" |
if self.connection is not None:
self.connection.close()
if self.connection_fd is not None:
self.connection_fd.close() |
<SYSTEM_TASK:>
method reads collection and refine slice upper bound for processing
<END_TASK>
<USER_TASK:>
Description:
def update_scope_of_processing(self, process_name, uow, start_timeperiod, end_timeperiod):
"""method reads collection and refine slice upper bound for processing""" |
source_collection_name = uow.source
last_object_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow.end_id = str(last_object_id)
self.uow_dao.update(uow)
msg = 'Updated processing range for {0}@{1} for collection {2}: [{3} : {4}]' \
.format(process_name, start_timeperiod, source_collection_name, uow.start_id, uow.end_id)
self._log_message(INFO, process_name, start_timeperiod, msg) |
<SYSTEM_TASK:>
method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
<END_TASK>
<USER_TASK:>
Description:
def _compute_and_transfer_to_final_run(self, process_name, start_timeperiod, end_timeperiod, job_record):
""" method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method""" |
source_collection_name = context.process_context[process_name].source
start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow, transfer_to_final = self.insert_and_publish_uow(job_record, start_id, end_id)
self.update_job(job_record, uow, job.STATE_FINAL_RUN)
if transfer_to_final:
self._process_state_final_run(job_record) |
<SYSTEM_TASK:>
method translates given timeperiod to the grouped timeperiod
<END_TASK>
<USER_TASK:>
Description:
def _translate_timeperiod(self, timeperiod):
""" method translates given timeperiod to the grouped timeperiod """ |
if self.time_grouping == 1:
# no translation is performed for identity grouping
return timeperiod
# step 1: tokenize timeperiod into: (year, month, day, hour)
# for instance: daily 2015031400 -> ('2015', '03', '14', '00')
year, month, day, hour = time_helper.tokenize_timeperiod(timeperiod)
# step 2: perform grouping on the stem
# ex1: stem of 14 with grouping 20 -> 20
# ex2: stem of 21 with grouping 20 -> 23
if self.time_qualifier == QUALIFIER_HOURLY:
stem = self._do_stem_grouping(timeperiod, int(hour))
result = '{0}{1}{2}{3:02d}'.format(year, month, day, stem)
elif self.time_qualifier == QUALIFIER_DAILY:
stem = self._do_stem_grouping(timeperiod, int(day))
result = '{0}{1}{2:02d}{3}'.format(year, month, stem, hour)
else: # self.time_qualifier == QUALIFIER_MONTHLY:
stem = self._do_stem_grouping(timeperiod, int(month))
result = '{0}{1:02d}{2}{3}'.format(year, stem, day, hour)
return result |
<SYSTEM_TASK:>
return a response_dict with a list of search results in decending
<END_TASK>
<USER_TASK:>
Description:
def query_mongo_sort_decend(
database_name,
collection_name,
query={},
skip=0,
limit=getattr(
settings,
'MONGO_LIMIT',
200),
return_keys=(),
sortkey=None):
"""return a response_dict with a list of search results in decending
order based on a sort key
""" |
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url,document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
if return_keys:
return_dict = {}
for k in return_keys:
return_dict[k] = 1
# print "returndict=",return_dict
mysearchresult = collection.find(
query, return_dict).skip(skip).limit(limit).sort(
sortkey, DESCENDING)
else:
mysearchresult = collection.find(query).skip(
skip).limit(limit).sort(sortkey, DESCENDING)
# response_dict['num_results']=int(mysearchresult.count(with_limit_and_skip=False))
response_dict['code'] = 200
response_dict['type'] = "search-results"
for d in mysearchresult:
d['id'] = d['_id'].__str__()
del d['_id']
l.append(d)
response_dict['results'] = l
except:
print("Error reading from Mongo")
print(str(sys.exc_info()))
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict |
<SYSTEM_TASK:>
delete from mongo helper
<END_TASK>
<USER_TASK:>
Description:
def delete_mongo(database_name, collection_name,
query={}, just_one=False):
"""delete from mongo helper""" |
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
mysearchresult = collection.remove(query, just_one)
response_dict['code'] = 200
response_dict['type'] = "remove-confirmation"
except:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict |
<SYSTEM_TASK:>
Write a document to the collection. Return a response_dict containing
<END_TASK>
<USER_TASK:>
Description:
def write_mongo(document, database_name,
collection_name, update=False):
"""Write a document to the collection. Return a response_dict containing
the written record. Method functions as both insert or update based on update
parameter""" |
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url,document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
# Cast the query to integers
# if settings.CAST_ININGS_TO_INTEGERS:
# query = cast_number_strings_to_integers(query)
potential_key_found = False
existing_transaction_id = None
existing_mongo_id = None
# enforce non-repudiation constraint on create
# if document.has_key("transaction_id"):
# existing_transaction_id = collection.find_one({'transaction_id':document['transaction_id']})
# if existing_transaction_id:
# potential_key_found = True
if "id" in document:
document["_id"] = ObjectId(document["id"])
del document["id"]
if "_id" in document:
existing_mongo_id = collection.find_one({'_id': document['_id']})
if existing_mongo_id:
potential_key_found = True
if update == False and potential_key_found == True:
"""409 conflict"""
response_dict['code'] = 409
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict[
'message'] = "Perhaps you meant to perform an update instead?"
response_dict['errors'] = [
"Conflict. This transaction_id has already been created.", ]
return response_dict
elif update and potential_key_found: # this is an update
# set kwargs _id to the existing_id to force to overwrite existing
# document
# if existing_transaction_id:
#
# document['_id'] = ObjectId(existing_transaction_id['_id'])
# document['history']=True
# history_collection_name = "%s_history" % str(collection_name)
# history_collection = db[str(history_collection_name)]
#
# history_object = existing_transaction_id
# history_object['historical_id'] = existing_transaction_id['_id']
# del history_object['_id']
# #now write the record to the historical collection
# written_object = history_collection.insert(history_object)
if existing_mongo_id:
document['_id'] = ObjectId(existing_mongo_id['_id'])
document['history'] = True
document['verified'] = False
history_collection_name = "%s_history" % str(collection_name)
history_collection = db[str(history_collection_name)]
# print history_collection
# print existing_mongo_id
history_object = existing_mongo_id
history_object['historical_id'] = existing_mongo_id['_id']
del history_object['_id']
# print history_object
# now write the record to the historical collection
written_object = history_collection.insert(history_object)
# update the record
myobjectid = collection.save(document)
else:
# this is new so perform an insert.
myobjectid = collection.insert(document)
# now fetch the record we just wrote so that we write it back to the
# DB.
myobject = collection.find_one({'_id': myobjectid})
response_dict['code'] = 200
response_dict['type'] = "write-results"
myobject['id'] = myobject['_id'].__str__()
del myobject['_id']
l.append(myobject)
response_dict['results'] = l
except:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['code'] = 400
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict |
<SYSTEM_TASK:>
return a result list or an empty list
<END_TASK>
<USER_TASK:>
Description:
def raw_query_mongo_db(kwargs, database_name, collection_name):
# for key in kwargs:
# print "arg: %s: %s" % (key, kwargs[key])
"""return a result list or an empty list""" |
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url,document_class=OrderedDict)
db = mc[database_name]
transactions = db[collection_name]
mysearchresult = transactions.find(kwargs)
mysearchcount = mysearchresult.count()
if mysearchcount > 0:
response_dict['code'] = 200
for d in mysearchresult:
l.append(d)
response_dict['results'] = l
except:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['code'] = 400
response_dict['type'] = "Error"
response_dict['message'] = str(sys.exc_info())
return response_dict |
<SYSTEM_TASK:>
d is a dict
<END_TASK>
<USER_TASK:>
Description:
def cast_number_strings_to_integers(d):
"""d is a dict""" |
for k, v in d.items():
# print type(v)
if determine_if_str_or_unicode(v):
if v.isdigit():
d[k] = int(v)
return d |
<SYSTEM_TASK:>
method returns either RestJob instance or corresponding document, depending on the as_model argument
<END_TASK>
<USER_TASK:>
Description:
def get_details(cls, node, as_model=False):
"""method returns either RestJob instance or corresponding document, depending on the as_model argument """ |
rest_job = RestJob(
process_name=node.process_name,
timeperiod=node.timeperiod,
time_qualifier=node.time_qualifier,
number_of_children=len(node.children),
number_of_failures='NA' if not node.job_record else node.job_record.number_of_failures,
state='NA' if not node.job_record else node.job_record.state,
event_log=[] if not node.job_record else node.job_record.event_log)
if as_model:
return rest_job
else:
return rest_job.document |
<SYSTEM_TASK:>
Generate the system effect based in pedalboard_info
<END_TASK>
<USER_TASK:>
Description:
def discover_system_effect(self, pedalboard_info):
"""
Generate the system effect based in pedalboard_info
:param dict pedalboard_info: For obtain this, see
:meth:`~pluginsmanager.util.mod_pedalboard_converter.ModPedalboardConvert.get_pedalboard_info()`
:return SystemEffect: SystemEffect generated based in pedalboard_info
""" |
# MOD swap ins and outs!!!
hardware = pedalboard_info['hardware']
total_audio_outs = hardware['audio_ins']
total_audio_ins = hardware['audio_outs']
outputs = ['capture_{}'.format(i) for i in range(1, total_audio_outs+1)]
inputs = ['playback_{}'.format(i) for i in range(1, total_audio_ins+1)]
midi_inputs = [
'serial_midi_out' if hardware['serial_midi_out'] else midi_out['symbol']
for midi_out in hardware['midi_outs'] if midi_out['valid']
]
midi_outputs = [
'serial_midi_in' if hardware['serial_midi_in'] else midi_in['symbol']
for midi_in in hardware['midi_ins'] if midi_in['valid']
]
return SystemEffect('system', outputs, inputs, midi_outputs, midi_inputs) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.