text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
The handler for the join_dags request.
<END_TASK>
<USER_TASK:>
Description:
def _handle_join_dags(self, request):
""" The handler for the join_dags request.
If dag names are given in the payload only return a valid Response if none of
the dags specified by the names are running anymore. If no dag names are given,
wait for all dags except one, which by design is the one that issued the request,
to be finished.
Args:
request (Request): Reference to a request object containing the
incoming request.
Returns:
Response: A response object containing the following fields:
- success: True if all dags the request was waiting for have
completed.
""" |
if request.payload['names'] is None:
send_response = len(self._dags_running) <= 1
else:
send_response = all([name not in self._dags_running.keys()
for name in request.payload['names']])
if send_response:
return Response(success=True, uid=request.uid)
else:
return None |
<SYSTEM_TASK:>
The handler for the stop_dag request.
<END_TASK>
<USER_TASK:>
Description:
def _handle_stop_dag(self, request):
""" The handler for the stop_dag request.
The stop_dag request adds a dag to the list of dags that should be stopped.
The dag will then stop queueing new tasks and will eventually stop running.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'name': the name of the dag that should be stopped
Returns:
Response: A response object containing the following fields:
- success: True if the dag was added successfully to the list
of dags that should be stopped.
""" |
if (request.payload['name'] is not None) and \
(request.payload['name'] not in self._stop_dags):
self._stop_dags.append(request.payload['name'])
return Response(success=True, uid=request.uid) |
<SYSTEM_TASK:>
The handler for the dag_stopped request.
<END_TASK>
<USER_TASK:>
Description:
def _handle_is_dag_stopped(self, request):
""" The handler for the dag_stopped request.
The dag_stopped request checks whether a dag is flagged to be terminated.
Args:
request (Request): Reference to a request object containing the
incoming request. The payload has to contain the
following fields:
'dag_name': the name of the dag that should be checked
Returns:
Response: A response object containing the following fields:
- is_stopped: True if the dag is flagged to be stopped.
""" |
return Response(success=True,
uid=request.uid,
payload={
'is_stopped': request.payload['dag_name'] in self._stop_dags
}) |
<SYSTEM_TASK:>
This function is called when the worker received a request to terminate.
<END_TASK>
<USER_TASK:>
Description:
def stop(self, consumer):
""" This function is called when the worker received a request to terminate.
Upon the termination of the worker, the workflows for all running jobs are
stopped gracefully.
Args:
consumer (Consumer): Reference to the consumer object that handles messages
from the broker.
""" |
stopped_workflows = []
for request in [r for r in consumer.controller.state.active_requests]:
job = AsyncResult(request.id)
workflow_id = job.result['workflow_id']
if workflow_id not in stopped_workflows:
client = Client(
SignalConnection(**consumer.app.user_options['config'].signal,
auto_connect=True),
request_key=workflow_id)
client.send(Request(action='stop_workflow'))
stopped_workflows.append(workflow_id) |
<SYSTEM_TASK:>
Schedule the execution of a dag by sending a signal to the workflow.
<END_TASK>
<USER_TASK:>
Description:
def start_dag(self, dag, *, data=None):
""" Schedule the execution of a dag by sending a signal to the workflow.
Args:
dag (Dag, str): The dag object or the name of the dag that should be started.
data (MultiTaskData): The data that should be passed on to the new dag.
Returns:
str: The name of the successfully started dag.
""" |
return self._client.send(
Request(
action='start_dag',
payload={'name': dag.name if isinstance(dag, Dag) else dag,
'data': data if isinstance(data, MultiTaskData) else None}
)
).payload['dag_name'] |
<SYSTEM_TASK:>
Wait for the specified dags to terminate.
<END_TASK>
<USER_TASK:>
Description:
def join_dags(self, names=None):
""" Wait for the specified dags to terminate.
This function blocks until the specified dags terminate. If no dags are specified
wait for all dags of the workflow, except the dag of the task calling this signal,
to terminate.
Args:
names (list): The names of the dags that have to terminate.
Returns:
bool: True if all the signal was sent successfully.
""" |
return self._client.send(
Request(
action='join_dags',
payload={'names': names}
)
).success |
<SYSTEM_TASK:>
Send a stop signal to the specified dag or the dag that hosts this task.
<END_TASK>
<USER_TASK:>
Description:
def stop_dag(self, name=None):
""" Send a stop signal to the specified dag or the dag that hosts this task.
Args:
name str: The name of the dag that should be stopped. If no name is given the
dag that hosts this task is stopped.
Upon receiving the stop signal, the dag will not queue any new tasks and wait
for running tasks to terminate.
Returns:
bool: True if the signal was sent successfully.
""" |
return self._client.send(
Request(
action='stop_dag',
payload={'name': name if name is not None else self._dag_name}
)
).success |
<SYSTEM_TASK:>
Check whether the task received a stop signal from the workflow.
<END_TASK>
<USER_TASK:>
Description:
def is_stopped(self):
""" Check whether the task received a stop signal from the workflow.
Tasks can use the stop flag to gracefully terminate their work. This is
particularly important for long running tasks and tasks that employ an
infinite loop, such as trigger tasks.
Returns:
bool: True if the task should be stopped.
""" |
resp = self._client.send(
Request(
action='is_dag_stopped',
payload={'dag_name': self._dag_name}
)
)
return resp.payload['is_stopped'] |
<SYSTEM_TASK:>
Generator function that returns celery events.
<END_TASK>
<USER_TASK:>
Description:
def event_stream(app, *, filter_by_prefix=None):
""" Generator function that returns celery events.
This function turns the callback based celery event handling into a generator.
Args:
app: Reference to a celery application object.
filter_by_prefix (str): If not None, only allow events that have a type that
starts with this prefix to yield an generator event.
Returns:
generator: A generator that returns celery events.
""" |
q = Queue()
def handle_event(event):
if filter_by_prefix is None or\
(filter_by_prefix is not None and
event['type'].startswith(filter_by_prefix)):
q.put(event)
def receive_events():
with app.connection() as connection:
recv = app.events.Receiver(connection, handlers={
'*': handle_event
})
recv.capture(limit=None, timeout=None, wakeup=True)
t = threading.Thread(target=receive_events)
t.start()
while True:
yield q.get(block=True) |
<SYSTEM_TASK:>
Factory function that turns a celery event into an event object.
<END_TASK>
<USER_TASK:>
Description:
def create_event_model(event):
""" Factory function that turns a celery event into an event object.
Args:
event (dict): A dictionary that represents a celery event.
Returns:
object: An event object representing the received event.
Raises:
JobEventTypeUnsupported: If an unsupported celery job event was received.
WorkerEventTypeUnsupported: If an unsupported celery worker event was received.
EventTypeUnknown: If an unknown event type (neither job nor worker) was received.
""" |
if event['type'].startswith('task'):
factory = {
JobEventName.Started: JobStartedEvent,
JobEventName.Succeeded: JobSucceededEvent,
JobEventName.Stopped: JobStoppedEvent,
JobEventName.Aborted: JobAbortedEvent
}
if event['type'] in factory:
return factory[event['type']].from_event(event)
else:
raise JobEventTypeUnsupported(
'Unsupported event type {}'.format(event['type']))
elif event['type'].startswith('worker'):
raise WorkerEventTypeUnsupported(
'Unsupported event type {}'.format(event['type']))
else:
raise EventTypeUnknown('Unknown event type {}'.format(event['type'])) |
<SYSTEM_TASK:>
Decorator that checks whether a configuration file was set.
<END_TASK>
<USER_TASK:>
Description:
def config_required(f):
""" Decorator that checks whether a configuration file was set. """ |
def new_func(obj, *args, **kwargs):
if 'config' not in obj:
click.echo(_style(obj.get('show_color', False),
'Could not find a valid configuration file!',
fg='red', bold=True))
raise click.Abort()
else:
return f(obj, *args, **kwargs)
return update_wrapper(new_func, f) |
<SYSTEM_TASK:>
Ingest the configuration object into the click context.
<END_TASK>
<USER_TASK:>
Description:
def ingest_config_obj(ctx, *, silent=True):
""" Ingest the configuration object into the click context. """ |
try:
ctx.obj['config'] = Config.from_file(ctx.obj['config_path'])
except ConfigLoadError as err:
click.echo(_style(ctx.obj['show_color'], str(err), fg='red', bold=True))
if not silent:
raise click.Abort() |
<SYSTEM_TASK:>
Command line client for lightflow. A lightweight, high performance pipeline
<END_TASK>
<USER_TASK:>
Description:
def cli(ctx, config, no_color):
""" Command line client for lightflow. A lightweight, high performance pipeline
system for synchrotrons.
Lightflow is being developed at the Australian Synchrotron.
""" |
ctx.obj = {
'show_color': not no_color if no_color is not None else True,
'config_path': config
} |
<SYSTEM_TASK:>
Create a default configuration file.
<END_TASK>
<USER_TASK:>
Description:
def config_default(dest):
""" Create a default configuration file.
\b
DEST: Path or file name for the configuration file.
""" |
conf_path = Path(dest).resolve()
if conf_path.is_dir():
conf_path = conf_path / LIGHTFLOW_CONFIG_NAME
conf_path.write_text(Config.default())
click.echo('Configuration written to {}'.format(conf_path)) |
<SYSTEM_TASK:>
List the current configuration.
<END_TASK>
<USER_TASK:>
Description:
def config_list(ctx):
""" List the current configuration. """ |
ingest_config_obj(ctx, silent=False)
click.echo(json.dumps(ctx.obj['config'].to_dict(), indent=4)) |
<SYSTEM_TASK:>
Copy the example workflows to a directory.
<END_TASK>
<USER_TASK:>
Description:
def config_examples(dest, user_dir):
""" Copy the example workflows to a directory.
\b
DEST: Path to which the examples should be copied.
""" |
examples_path = Path(lightflow.__file__).parents[1] / 'examples'
if examples_path.exists():
dest_path = Path(dest).resolve()
if not user_dir:
dest_path = dest_path / 'examples'
if dest_path.exists():
if not click.confirm('Directory already exists. Overwrite existing files?',
default=True, abort=True):
return
else:
dest_path.mkdir()
for example_file in examples_path.glob('*.py'):
shutil.copy(str(example_file), str(dest_path / example_file.name))
click.echo('Copied examples to {}'.format(str(dest_path)))
else:
click.echo('The examples source path does not exist') |
<SYSTEM_TASK:>
Send a workflow to the queue.
<END_TASK>
<USER_TASK:>
Description:
def workflow_start(obj, queue, keep_data, name, workflow_args):
""" Send a workflow to the queue.
\b
NAME: The name of the workflow that should be started.
WORKFLOW_ARGS: Workflow arguments in the form key1=value1 key2=value2.
""" |
try:
start_workflow(name=name,
config=obj['config'],
queue=queue,
clear_data_store=not keep_data,
store_args=dict([arg.split('=', maxsplit=1)
for arg in workflow_args]))
except (WorkflowArgumentError, WorkflowImportError) as e:
click.echo(_style(obj['show_color'],
'An error occurred when trying to start the workflow',
fg='red', bold=True))
click.echo('{}'.format(e))
except WorkflowDefinitionError as e:
click.echo(_style(obj['show_color'],
'The graph {} in workflow {} is not a directed acyclic graph'.
format(e.graph_name, e.workflow_name), fg='red', bold=True)) |
<SYSTEM_TASK:>
Stop one or more running workflows.
<END_TASK>
<USER_TASK:>
Description:
def workflow_stop(obj, names):
""" Stop one or more running workflows.
\b
NAMES: The names, ids or job ids of the workflows that should be stopped.
Leave empty to stop all running workflows.
""" |
if len(names) == 0:
msg = 'Would you like to stop all workflows?'
else:
msg = '\n{}\n\n{}'.format('\n'.join(names),
'Would you like to stop these jobs?')
if click.confirm(msg, default=True, abort=True):
stop_workflow(obj['config'], names=names if len(names) > 0 else None) |
<SYSTEM_TASK:>
Show the status of the workflows.
<END_TASK>
<USER_TASK:>
Description:
def workflow_status(obj, details):
""" Show the status of the workflows. """ |
show_colors = obj['show_color']
config_cli = obj['config'].cli
if details:
temp_form = '{:>{}} {:20} {:25} {:25} {:38} {}'
else:
temp_form = '{:>{}} {:20} {:25} {} {} {}'
click.echo('\n')
click.echo(temp_form.format(
'Status',
12,
'Name',
'Start Time',
'ID' if details else '',
'Job' if details else '',
'Arguments'
))
click.echo('-' * (138 if details else 75))
def print_jobs(jobs, *, label='', color='green'):
for job in jobs:
start_time = job.start_time if job.start_time is not None else 'unknown'
click.echo(temp_form.format(
_style(show_colors, label, fg=color, bold=True),
25 if show_colors else 12,
job.name,
start_time.replace(tzinfo=pytz.utc).astimezone().strftime(
config_cli['time_format']),
job.workflow_id if details else '',
job.id if details else '',
','.join(['{}={}'.format(k, v) for k, v in job.arguments.items()]))
)
# running jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Active,
filter_by_type=JobType.Workflow),
label='Running', color='green')
# scheduled jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Scheduled,
filter_by_type=JobType.Workflow),
label='Scheduled', color='blue')
# registered jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Registered,
filter_by_type=JobType.Workflow),
label='Registered', color='yellow')
# reserved jobs
print_jobs(list_jobs(config=obj['config'],
status=JobStatus.Reserved,
filter_by_type=JobType.Workflow),
label='Reserved', color='yellow') |
<SYSTEM_TASK:>
Stop running workers.
<END_TASK>
<USER_TASK:>
Description:
def worker_stop(obj, worker_ids):
""" Stop running workers.
\b
WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all.
""" |
if len(worker_ids) == 0:
msg = 'Would you like to stop all workers?'
else:
msg = '\n{}\n\n{}'.format('\n'.join(worker_ids),
'Would you like to stop these workers?')
if click.confirm(msg, default=True, abort=True):
stop_worker(obj['config'],
worker_ids=list(worker_ids) if len(worker_ids) > 0 else None) |
<SYSTEM_TASK:>
Show the status of all running workers.
<END_TASK>
<USER_TASK:>
Description:
def worker_status(obj, filter_queues, details):
""" Show the status of all running workers. """ |
show_colors = obj['show_color']
f_queues = filter_queues.split(',') if filter_queues is not None else None
workers = list_workers(config=obj['config'], filter_by_queues=f_queues)
if len(workers) == 0:
click.echo('No workers are running at the moment.')
return
for ws in workers:
click.echo('{} {}'.format(_style(show_colors, 'Worker:', fg='blue', bold=True),
_style(show_colors, ws.name, fg='blue')))
click.echo('{:23} {}'.format(_style(show_colors, '> pid:', bold=True), ws.pid))
if details:
click.echo('{:23} {}'.format(_style(show_colors, '> concurrency:', bold=True),
ws.concurrency))
click.echo('{:23} {}'.format(_style(show_colors, '> processes:', bold=True),
', '.join(str(p) for p in ws.process_pids)))
click.echo('{:23} {}://{}:{}/{}'.format(_style(show_colors, '> broker:',
bold=True),
ws.broker.transport,
ws.broker.hostname,
ws.broker.port,
ws.broker.virtual_host))
click.echo('{:23} {}'.format(_style(show_colors, '> queues:', bold=True),
', '.join([q.name for q in ws.queues])))
if details:
click.echo('{:23} {}'.format(_style(show_colors, '> job count:', bold=True),
ws.job_count))
jobs = list_jobs(config=obj['config'], filter_by_worker=ws.name)
click.echo('{:23} [{}]'.format(_style(show_colors, '> jobs:', bold=True),
len(jobs) if len(jobs) > 0 else 'No tasks'))
for job in jobs:
click.echo('{:15} {} {}'.format(
'',
_style(show_colors, '{}'.format(job.name),
bold=True, fg=JOB_COLOR[job.type]),
_style(show_colors, '({}) [{}] <{}> on {}'.format(
job.type, job.workflow_id, job.id, job.worker_pid),
fg=JOB_COLOR[job.type])))
click.echo('\n') |
<SYSTEM_TASK:>
Show the worker and workflow event stream.
<END_TASK>
<USER_TASK:>
Description:
def monitor(ctx, details):
""" Show the worker and workflow event stream. """ |
ingest_config_obj(ctx, silent=False)
show_colors = ctx.obj['show_color']
event_display = {
JobEventName.Started: {'color': 'blue', 'label': 'started'},
JobEventName.Succeeded: {'color': 'green', 'label': 'succeeded'},
JobEventName.Stopped: {'color': 'yellow', 'label': 'stopped'},
JobEventName.Aborted: {'color': 'red', 'label': 'aborted'}
}
click.echo('\n')
click.echo('{:>10} {:>12} {:25} {:18} {:16} {:28} {}'.format(
'Status',
'Type',
'Name',
'Duration (sec)',
'Queue' if details else '',
'Workflow ID' if details else '',
'Worker' if details else ''))
click.echo('-' * (136 if details else 65))
for event in workflow_events(ctx.obj['config']):
evt_disp = event_display[event.event]
click.echo('{:>{}} {:>{}} {:25} {:18} {:16} {:28} {}'.format(
_style(show_colors, evt_disp['label'], fg=evt_disp['color']),
20 if show_colors else 10,
_style(show_colors, event.type, bold=True, fg=JOB_COLOR[event.type]),
24 if show_colors else 12,
event.name,
'{0:.3f}'.format(event.duration) if event.duration is not None else '',
event.queue if details else '',
event.workflow_id if details else '',
event.hostname if details else '')) |
<SYSTEM_TASK:>
Run an extension by its name.
<END_TASK>
<USER_TASK:>
Description:
def ext(obj, ext_name, ext_args):
""" Run an extension by its name.
\b
EXT_NAME: The name of the extension.
EXT_ARGS: Arguments that are passed to the extension.
""" |
try:
mod = import_module('lightflow_{}.__main__'.format(ext_name))
mod.main(ext_args)
except ImportError as err:
click.echo(_style(obj['show_color'],
'An error occurred when trying to call the extension',
fg='red', bold=True))
click.echo('{}'.format(err)) |
<SYSTEM_TASK:>
Create a fully configured Celery application object.
<END_TASK>
<USER_TASK:>
Description:
def create_app(config):
""" Create a fully configured Celery application object.
Args:
config (Config): A reference to a lightflow configuration object.
Returns:
Celery: A fully configured Celery application object.
""" |
# configure the celery logging system with the lightflow settings
setup_logging.connect(partial(_initialize_logging, config), weak=False)
task_postrun.connect(partial(_cleanup_workflow, config), weak=False)
# patch Celery to use cloudpickle instead of pickle for serialisation
patch_celery()
# create the main celery app and load the configuration
app = Celery('lightflow')
app.conf.update(**config.celery)
# overwrite user supplied settings to make sure celery works with lightflow
app.conf.update(
task_serializer='pickle',
accept_content=['pickle'],
result_serializer='pickle',
task_default_queue=DefaultJobQueueName.Task
)
if isinstance(app.conf.include, list):
app.conf.include.extend(LIGHTFLOW_INCLUDE)
else:
if len(app.conf.include) > 0:
raise ConfigOverwriteError(
'The content in the include config will be overwritten')
app.conf.include = LIGHTFLOW_INCLUDE
return app |
<SYSTEM_TASK:>
Cleanup the results of a workflow when it finished.
<END_TASK>
<USER_TASK:>
Description:
def _cleanup_workflow(config, task_id, args, **kwargs):
""" Cleanup the results of a workflow when it finished.
Connects to the postrun signal of Celery. If the signal was sent by a workflow,
remove the result from the result backend.
Args:
task_id (str): The id of the task.
args (tuple): The arguments the task was started with.
**kwargs: Keyword arguments from the hook.
""" |
from lightflow.models import Workflow
if isinstance(args[0], Workflow):
if config.celery['result_expires'] == 0:
AsyncResult(task_id).forget() |
<SYSTEM_TASK:>
Celery task that runs a single dag on a worker.
<END_TASK>
<USER_TASK:>
Description:
def execute_dag(self, dag, workflow_id, data=None):
""" Celery task that runs a single dag on a worker.
This celery task starts, manages and monitors the individual tasks of a dag.
Args:
self (Task): Reference to itself, the celery task object.
dag (Dag): Reference to a Dag object that is being used to start, manage and
monitor tasks.
workflow_id (string): The unique ID of the workflow run that started this dag.
data (MultiTaskData): An optional MultiTaskData object that is being passed to
the first tasks in the dag. This allows the transfer of
data from dag to dag.
""" |
start_time = datetime.utcnow()
logger.info('Running DAG <{}>'.format(dag.name))
store_doc = DataStore(**self.app.user_options['config'].data_store,
auto_connect=True).get(workflow_id)
store_loc = 'log.{}'.format(dag.name)
# update data store with provenance information
store_doc.set(key='{}.start_time'.format(store_loc), value=start_time,
section=DataStoreDocumentSection.Meta)
# send custom celery event that the dag has been started
self.send_event(JobEventName.Started,
job_type=JobType.Dag,
name=dag.name,
queue=dag.queue,
time=start_time,
workflow_id=workflow_id,
duration=None)
# store job specific meta information wth the job
self.update_state(meta={'name': dag.name,
'queue': dag.queue,
'type': JobType.Dag,
'workflow_id': workflow_id})
# run the tasks in the DAG
signal = DagSignal(Client(SignalConnection(**self.app.user_options['config'].signal,
auto_connect=True),
request_key=workflow_id), dag.name)
dag.run(config=self.app.user_options['config'],
workflow_id=workflow_id,
signal=signal,
data=data)
end_time = datetime.utcnow()
duration = (end_time - start_time).total_seconds()
# update data store with provenance information
store_doc.set(key='{}.end_time'.format(store_loc), value=end_time,
section=DataStoreDocumentSection.Meta)
store_doc.set(key='{}.duration'.format(store_loc), value=duration,
section=DataStoreDocumentSection.Meta)
# send custom celery event that the dag has succeeded
event_name = JobEventName.Succeeded if not signal.is_stopped else JobEventName.Aborted
self.send_event(event_name,
job_type=JobType.Dag,
name=dag.name,
queue=dag.queue,
time=end_time,
workflow_id=workflow_id,
duration=duration)
logger.info('Finished DAG <{}>'.format(dag.name)) |
<SYSTEM_TASK:>
Create a BrokerStats object from the dictionary returned by celery.
<END_TASK>
<USER_TASK:>
Description:
def from_celery(cls, broker_dict):
""" Create a BrokerStats object from the dictionary returned by celery.
Args:
broker_dict (dict): The dictionary as returned by celery.
Returns:
BrokerStats: A fully initialized BrokerStats object.
""" |
return BrokerStats(
hostname=broker_dict['hostname'],
port=broker_dict['port'],
transport=broker_dict['transport'],
virtual_host=broker_dict['virtual_host']
) |
<SYSTEM_TASK:>
Return a dictionary of the broker stats.
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
""" Return a dictionary of the broker stats.
Returns:
dict: Dictionary of the stats.
""" |
return {
'hostname': self.hostname,
'port': self.port,
'transport': self.transport,
'virtual_host': self.virtual_host
} |
<SYSTEM_TASK:>
Create a WorkerStats object from the dictionary returned by celery.
<END_TASK>
<USER_TASK:>
Description:
def from_celery(cls, name, worker_dict, queues):
""" Create a WorkerStats object from the dictionary returned by celery.
Args:
name (str): The name of the worker.
worker_dict (dict): The dictionary as returned by celery.
queues (list): A list of QueueStats objects that represent the queues this
worker is listening on.
Returns:
WorkerStats: A fully initialized WorkerStats object.
""" |
return WorkerStats(
name=name,
broker=BrokerStats.from_celery(worker_dict['broker']),
pid=worker_dict['pid'],
process_pids=worker_dict['pool']['processes'],
concurrency=worker_dict['pool']['max-concurrency'],
job_count=worker_dict['pool']['writes']['total'],
queues=queues
) |
<SYSTEM_TASK:>
Return a dictionary of the worker stats.
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
""" Return a dictionary of the worker stats.
Returns:
dict: Dictionary of the stats.
""" |
return {
'name': self.name,
'broker': self.broker.to_dict(),
'pid': self.pid,
'process_pids': self.process_pids,
'concurrency': self.concurrency,
'job_count': self.job_count,
'queues': [q.to_dict() for q in self.queues]
} |
<SYSTEM_TASK:>
Create a JobStats object from the dictionary returned by celery.
<END_TASK>
<USER_TASK:>
Description:
def from_celery(cls, worker_name, job_dict, celery_app):
""" Create a JobStats object from the dictionary returned by celery.
Args:
worker_name (str): The name of the worker this jobs runs on.
job_dict (dict): The dictionary as returned by celery.
celery_app: Reference to a celery application object.
Returns:
JobStats: A fully initialized JobStats object.
""" |
if not isinstance(job_dict, dict) or 'id' not in job_dict:
raise JobStatInvalid('The job description is missing important fields.')
async_result = AsyncResult(id=job_dict['id'], app=celery_app)
a_info = async_result.info if isinstance(async_result.info, dict) else None
return JobStats(
name=a_info.get('name', '') if a_info is not None else '',
job_id=job_dict['id'],
job_type=a_info.get('type', '') if a_info is not None else '',
workflow_id=a_info.get('workflow_id', '') if a_info is not None else '',
queue=a_info.get('queue', '') if a_info is not None else '',
start_time=a_info.get('start_time', None) if a_info is not None else None,
arguments=a_info.get('arguments', {}) if a_info is not None else {},
acknowledged=job_dict['acknowledged'],
func_name=job_dict['type'],
hostname=job_dict['hostname'],
worker_name=worker_name,
worker_pid=job_dict['worker_pid'],
routing_key=job_dict['delivery_info']['routing_key']
) |
<SYSTEM_TASK:>
Return a dictionary of the job stats.
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
""" Return a dictionary of the job stats.
Returns:
dict: Dictionary of the stats.
""" |
return {
'name': self.name,
'id': self.id,
'type': self.type,
'workflow_id': self.workflow_id,
'queue': self.queue,
'start_time': self.start_time,
'arguments': self.arguments,
'acknowledged': self.acknowledged,
'func_name': self.func_name,
'hostname': self.hostname,
'worker_name': self.worker_name,
'worker_pid': self.worker_pid,
'routing_key': self.routing_key
} |
<SYSTEM_TASK:>
Create a JobEvent object from the event dictionary returned by celery.
<END_TASK>
<USER_TASK:>
Description:
def from_event(cls, event):
""" Create a JobEvent object from the event dictionary returned by celery.
Args:
event (dict): The dictionary as returned by celery.
Returns:
JobEvent: A fully initialized JobEvent object.
""" |
return cls(
uuid=event['uuid'],
job_type=event['job_type'],
event_type=event['type'],
queue=event['queue'],
hostname=event['hostname'],
pid=event['pid'],
name=event['name'],
workflow_id=event['workflow_id'],
event_time=event['time'],
duration=event['duration']
) |
<SYSTEM_TASK:>
Start a single workflow by sending it to the workflow queue.
<END_TASK>
<USER_TASK:>
Description:
def start_workflow(name, config, *, queue=DefaultJobQueueName.Workflow,
clear_data_store=True, store_args=None):
""" Start a single workflow by sending it to the workflow queue.
Args:
name (str): The name of the workflow that should be started. Refers to the
name of the workflow file without the .py extension.
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
queue (str): Name of the queue the workflow should be scheduled to.
clear_data_store (bool): Remove any documents created during the workflow
run in the data store after the run.
store_args (dict): Dictionary of additional arguments that are ingested into the
data store prior to the execution of the workflow.
Returns:
str: The ID of the workflow job.
Raises:
WorkflowArgumentError: If the workflow requires arguments to be set in store_args
that were not supplied to the workflow.
WorkflowImportError: If the import of the workflow fails.
""" |
try:
wf = Workflow.from_name(name,
queue=queue,
clear_data_store=clear_data_store,
arguments=store_args)
except DirectedAcyclicGraphInvalid as e:
raise WorkflowDefinitionError(workflow_name=name,
graph_name=e.graph_name)
celery_app = create_app(config)
result = celery_app.send_task(JobExecPath.Workflow,
args=(wf,), queue=queue, routing_key=queue)
return result.id |
<SYSTEM_TASK:>
Stop one or more workflows.
<END_TASK>
<USER_TASK:>
Description:
def stop_workflow(config, *, names=None):
""" Stop one or more workflows.
Args:
config (Config): Reference to the configuration object from which the
settings for the workflow are retrieved.
names (list): List of workflow names, workflow ids or workflow job ids for the
workflows that should be stopped. If all workflows should be
stopped, set it to None.
Returns:
tuple: A tuple of the workflow jobs that were successfully stopped and the ones
that could not be stopped.
""" |
jobs = list_jobs(config, filter_by_type=JobType.Workflow)
if names is not None:
filtered_jobs = []
for job in jobs:
if (job.id in names) or (job.name in names) or (job.workflow_id in names):
filtered_jobs.append(job)
else:
filtered_jobs = jobs
success = []
failed = []
for job in filtered_jobs:
client = Client(SignalConnection(**config.signal, auto_connect=True),
request_key=job.workflow_id)
if client.send(Request(action='stop_workflow')).success:
success.append(job)
else:
failed.append(job)
return success, failed |
<SYSTEM_TASK:>
Return a list of Celery jobs.
<END_TASK>
<USER_TASK:>
Description:
def list_jobs(config, *, status=JobStatus.Active,
filter_by_type=None, filter_by_worker=None):
""" Return a list of Celery jobs.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
status (JobStatus): The status of the jobs that should be returned.
filter_by_type (list): Restrict the returned jobs to the types in this list.
filter_by_worker (list): Only return jobs that were registered, reserved or are
running on the workers given in this list of worker names. Using
this option will increase the performance.
Returns:
list: A list of JobStats.
""" |
celery_app = create_app(config)
# option to filter by the worker (improves performance)
if filter_by_worker is not None:
inspect = celery_app.control.inspect(
destination=filter_by_worker if isinstance(filter_by_worker, list)
else [filter_by_worker])
else:
inspect = celery_app.control.inspect()
# get active, registered or reserved jobs
if status == JobStatus.Active:
job_map = inspect.active()
elif status == JobStatus.Registered:
job_map = inspect.registered()
elif status == JobStatus.Reserved:
job_map = inspect.reserved()
elif status == JobStatus.Scheduled:
job_map = inspect.scheduled()
else:
job_map = None
if job_map is None:
return []
result = []
for worker_name, jobs in job_map.items():
for job in jobs:
try:
job_stats = JobStats.from_celery(worker_name, job, celery_app)
if (filter_by_type is None) or (job_stats.type == filter_by_type):
result.append(job_stats)
except JobStatInvalid:
pass
return result |
<SYSTEM_TASK:>
Return a generator that yields workflow events.
<END_TASK>
<USER_TASK:>
Description:
def events(config):
""" Return a generator that yields workflow events.
For every workflow event that is sent from celery this generator yields an event
object.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
Returns:
generator: A generator that returns workflow events.
""" |
celery_app = create_app(config)
for event in event_stream(celery_app, filter_by_prefix='task'):
try:
yield create_event_model(event)
except JobEventTypeUnsupported:
pass |
<SYSTEM_TASK:>
Drain the process output streams.
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Drain the process output streams. """ |
read_stdout = partial(self._read_output, stream=self._process.stdout,
callback=self._callback_stdout,
output_file=self._stdout_file)
read_stderr = partial(self._read_output, stream=self._process.stderr,
callback=self._callback_stderr,
output_file=self._stderr_file)
# capture the process output as long as the process is active
try:
while self._process.poll() is None:
result_stdout = read_stdout()
result_stderr = read_stderr()
if not result_stdout and not result_stderr:
sleep(self._refresh_time)
# read remaining lines
while read_stdout():
pass
while read_stderr():
pass
except (StopTask, AbortWorkflow) as exc:
self._exc_obj = exc |
<SYSTEM_TASK:>
Read the output of the process, executed the callback and save the output.
<END_TASK>
<USER_TASK:>
Description:
def _read_output(self, stream, callback, output_file):
""" Read the output of the process, executed the callback and save the output.
Args:
stream: A file object pointing to the output stream that should be read.
callback(callable, None): A callback function that is called for each new
line of output.
output_file: A file object to which the full output is written.
Returns:
bool: True if a line was read from the output, otherwise False.
""" |
if (callback is None and output_file is None) or stream.closed:
return False
line = stream.readline()
if line:
if callback is not None:
callback(line.decode(),
self._data, self._store, self._signal, self._context)
if output_file is not None:
output_file.write(line)
return True
else:
return False |
<SYSTEM_TASK:>
Function wrapper that sets the user and group for the process
<END_TASK>
<USER_TASK:>
Description:
def _run_as(user, group):
""" Function wrapper that sets the user and group for the process """ |
def wrapper():
if user is not None:
os.setuid(user)
if group is not None:
os.setgid(group)
return wrapper |
<SYSTEM_TASK:>
Convert the specified value to the type of the option.
<END_TASK>
<USER_TASK:>
Description:
def convert(self, value):
""" Convert the specified value to the type of the option.
Args:
value: The value that should be converted.
Returns:
The value with the type given by the option.
""" |
if self._type is str:
return str(value)
elif self._type is int:
try:
return int(value)
except (UnicodeError, ValueError):
raise WorkflowArgumentError('Cannot convert {} to int'.format(value))
elif self._type is float:
try:
return float(value)
except (UnicodeError, ValueError):
raise WorkflowArgumentError('Cannot convert {} to float'.format(value))
elif self._type is bool:
if isinstance(value, bool):
return bool(value)
value = value.lower()
if value in ('true', '1', 'yes', 'y'):
return True
elif value in ('false', '0', 'no', 'n'):
return False
raise WorkflowArgumentError('Cannot convert {} to bool'.format(value))
else:
return value |
<SYSTEM_TASK:>
Returns the names of all options that are required but were not specified.
<END_TASK>
<USER_TASK:>
Description:
def check_missing(self, args):
""" Returns the names of all options that are required but were not specified.
All options that don't have a default value are required in order to run the
workflow.
Args:
args (dict): A dictionary of the provided arguments that is checked for
missing options.
Returns:
list: A list with the names of the options that are missing from the
provided arguments.
""" |
return [opt.name for opt in self
if (opt.name not in args) and (opt.default is None)] |
<SYSTEM_TASK:>
Consolidate the provided arguments.
<END_TASK>
<USER_TASK:>
Description:
def consolidate(self, args):
""" Consolidate the provided arguments.
If the provided arguments have matching options, this performs a type conversion.
For any option that has a default value and is not present in the provided
arguments, the default value is added.
Args:
args (dict): A dictionary of the provided arguments.
Returns:
dict: A dictionary with the type converted and with default options enriched
arguments.
""" |
result = dict(args)
for opt in self:
if opt.name in result:
result[opt.name] = opt.convert(result[opt.name])
else:
if opt.default is not None:
result[opt.name] = opt.convert(opt.default)
return result |
<SYSTEM_TASK:>
Validate the graph by checking whether it is a directed acyclic graph.
<END_TASK>
<USER_TASK:>
Description:
def validate(self, graph):
""" Validate the graph by checking whether it is a directed acyclic graph.
Args:
graph (DiGraph): Reference to a DiGraph object from NetworkX.
Raises:
DirectedAcyclicGraphInvalid: If the graph is not a valid dag.
""" |
if not nx.is_directed_acyclic_graph(graph):
raise DirectedAcyclicGraphInvalid(graph_name=self._name) |
<SYSTEM_TASK:>
Merge the specified dataset on top of the existing data.
<END_TASK>
<USER_TASK:>
Description:
def merge(self, dataset):
""" Merge the specified dataset on top of the existing data.
This replaces all values in the existing dataset with the values from the
given dataset.
Args:
dataset (TaskData): A reference to the TaskData object that should be merged
on top of the existing object.
""" |
def merge_data(source, dest):
for key, value in source.items():
if isinstance(value, dict):
merge_data(value, dest.setdefault(key, {}))
else:
dest[key] = value
return dest
merge_data(dataset.data, self._data)
for h in dataset.task_history:
if h not in self._task_history:
self._task_history.append(h) |
<SYSTEM_TASK:>
Add a new dataset to the MultiTaskData.
<END_TASK>
<USER_TASK:>
Description:
def add_dataset(self, task_name, dataset=None, *, aliases=None):
""" Add a new dataset to the MultiTaskData.
Args:
task_name (str): The name of the task from which the dataset was received.
dataset (TaskData): The dataset that should be added.
aliases (list): A list of aliases that should be registered with the dataset.
""" |
self._datasets.append(dataset if dataset is not None else TaskData())
last_index = len(self._datasets) - 1
self._aliases[task_name] = last_index
if aliases is not None:
for alias in aliases:
self._aliases[alias] = last_index
if len(self._datasets) == 1:
self._default_index = 0 |
<SYSTEM_TASK:>
Add an alias pointing to the specified index.
<END_TASK>
<USER_TASK:>
Description:
def add_alias(self, alias, index):
""" Add an alias pointing to the specified index.
Args:
alias (str): The alias that should point to the given index.
index (int): The index of the dataset for which an alias should be added.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
""" |
if index >= len(self._datasets):
raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))
self._aliases[alias] = index |
<SYSTEM_TASK:>
Merge all datasets into a single dataset.
<END_TASK>
<USER_TASK:>
Description:
def flatten(self, in_place=True):
""" Merge all datasets into a single dataset.
The default dataset is the last dataset to be merged, as it is considered to be
the primary source of information and should overwrite all existing fields with
the same key.
Args:
in_place (bool): Set to ``True`` to replace the existing datasets with the
merged one. If set to ``False``, will return a new MultiTaskData
object containing the merged dataset.
Returns:
MultiTaskData: If the in_place flag is set to False.
""" |
new_dataset = TaskData()
for i, dataset in enumerate(self._datasets):
if i != self._default_index:
new_dataset.merge(dataset)
new_dataset.merge(self.default_dataset)
# point all aliases to the new, single dataset
new_aliases = {alias: 0 for alias, _ in self._aliases.items()}
# replace existing datasets or return a new MultiTaskData object
if in_place:
self._datasets = [new_dataset]
self._aliases = new_aliases
self._default_index = 0
else:
return MultiTaskData(dataset=new_dataset, aliases=list(new_aliases.keys())) |
<SYSTEM_TASK:>
Set the default dataset by its alias.
<END_TASK>
<USER_TASK:>
Description:
def set_default_by_alias(self, alias):
""" Set the default dataset by its alias.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
alias (str): The alias of the dataset that should be made the default.
Raises:
DataInvalidAlias: If the alias does not represent a valid dataset.
""" |
if alias not in self._aliases:
raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))
self._default_index = self._aliases[alias] |
<SYSTEM_TASK:>
Set the default dataset by its index.
<END_TASK>
<USER_TASK:>
Description:
def set_default_by_index(self, index):
""" Set the default dataset by its index.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
index (int): The index of the dataset that should be made the default.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
""" |
if index >= len(self._datasets):
raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))
self._default_index = index |
<SYSTEM_TASK:>
Return a dataset by its alias.
<END_TASK>
<USER_TASK:>
Description:
def get_by_alias(self, alias):
""" Return a dataset by its alias.
Args:
alias (str): The alias of the dataset that should be returned.
Raises:
DataInvalidAlias: If the alias does not represent a valid dataset.
""" |
if alias not in self._aliases:
raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias))
return self.get_by_index(self._aliases[alias]) |
<SYSTEM_TASK:>
Return a dataset by its index.
<END_TASK>
<USER_TASK:>
Description:
def get_by_index(self, index):
""" Return a dataset by its index.
Args:
index (int): The index of the dataset that should be returned.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
""" |
if index >= len(self._datasets):
raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))
return self._datasets[index] |
<SYSTEM_TASK:>
Return the task context content as a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def to_dict(self):
""" Return the task context content as a dictionary. """ |
return {
'task_name': self.task_name,
'dag_name': self.dag_name,
'workflow_name': self.workflow_name,
'workflow_id': self.workflow_id,
'worker_hostname': self.worker_hostname
} |
<SYSTEM_TASK:>
Stop a worker process.
<END_TASK>
<USER_TASK:>
Description:
def stop_worker(config, *, worker_ids=None):
""" Stop a worker process.
Args:
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
worker_ids (list): An optional list of ids for the worker that should be stopped.
""" |
if worker_ids is not None and not isinstance(worker_ids, list):
worker_ids = [worker_ids]
celery_app = create_app(config)
celery_app.control.shutdown(destination=worker_ids) |
<SYSTEM_TASK:>
Return a list of all available workers.
<END_TASK>
<USER_TASK:>
Description:
def list_workers(config, *, filter_by_queues=None):
""" Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
at least one of the queue names in this list.
Returns:
list: A list of WorkerStats objects.
""" |
celery_app = create_app(config)
worker_stats = celery_app.control.inspect().stats()
queue_stats = celery_app.control.inspect().active_queues()
if worker_stats is None:
return []
workers = []
for name, w_stat in worker_stats.items():
queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]]
add_worker = filter_by_queues is None
if not add_worker:
for queue in queues:
if queue.name in filter_by_queues:
add_worker = True
break
if add_worker:
workers.append(WorkerStats.from_celery(name, w_stat, queues))
return workers |
<SYSTEM_TASK:>
Return a new object in which callable parameters have been evaluated.
<END_TASK>
<USER_TASK:>
Description:
def eval(self, data, data_store, *, exclude=None):
""" Return a new object in which callable parameters have been evaluated.
Native types are not touched and simply returned, while callable methods are
executed and their return value is returned.
Args:
data (MultiTaskData): The data object that has been passed from the
predecessor task.
data_store (DataStore): The persistent data store object that allows the task
to store data for access across the current workflow
run.
exclude (list): List of key names as strings that should be excluded from
the evaluation.
Returns:
TaskParameters: A new TaskParameters object with the callable parameters
replaced by their return value.
""" |
exclude = [] if exclude is None else exclude
result = {}
for key, value in self.items():
if key in exclude:
continue
if value is not None and callable(value):
result[key] = value(data, data_store)
else:
result[key] = value
return TaskParameters(result) |
<SYSTEM_TASK:>
Evaluate the value of a single parameter taking into account callables .
<END_TASK>
<USER_TASK:>
Description:
def eval_single(self, key, data, data_store):
""" Evaluate the value of a single parameter taking into account callables .
Native types are not touched and simply returned, while callable methods are
executed and their return value is returned.
Args:
key (str): The name of the parameter that should be evaluated.
data (MultiTaskData): The data object that has been passed from the
predecessor task.
data_store (DataStore): The persistent data store object that allows the task
to store data for access across the current workflow
run.
""" |
if key in self:
value = self[key]
if value is not None and callable(value):
return value(data, data_store)
else:
return value
else:
raise AttributeError() |
<SYSTEM_TASK:>
Copy the complete lookup data into redis. Old data will be overwritten.
<END_TASK>
<USER_TASK:>
Description:
def copy_data_in_redis(self, redis_prefix, redis_instance):
"""
Copy the complete lookup data into redis. Old data will be overwritten.
Args:
redis_prefix (str): Prefix to distinguish the data in redis for the different looktypes
redis_instance (str): an Instance of Redis
Returns:
bool: returns True when the data has been copied successfully into Redis
Example:
Copy the entire lookup data from the Country-files.com PLIST File into Redis. This example requires a running
instance of Redis, as well the python Redis connector (pip install redis-py).
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> print my_lookuplib.copy_data_in_redis(redis_prefix="CF", redis_instance=r)
True
Now let's create an instance of LookupLib, using Redis to query the data
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile", redis_instance=r, redis_prefix="CF")
>>> my_lookuplib.lookup_callsign("3D2RI")
{
u'adif': 460,
u'continent': u'OC',
u'country': u'Rotuma Island',
u'cqz': 32,
u'ituz': 56,
u'latitude': -12.48,
u'longitude': 177.08
}
Note:
This method is available for the following lookup type
- clublogxml
- countryfile
""" |
if redis_instance is not None:
self._redis = redis_instance
if self._redis is None:
raise AttributeError("redis_instance is missing")
if redis_prefix is None:
raise KeyError("redis_prefix is missing")
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
self._push_dict_to_redis(self._entities, redis_prefix, "_entity_")
self._push_dict_index_to_redis(self._callsign_exceptions_index, redis_prefix, "_call_ex_index_")
self._push_dict_to_redis(self._callsign_exceptions, redis_prefix, "_call_ex_")
self._push_dict_index_to_redis(self._prefixes_index, redis_prefix, "_prefix_index_")
self._push_dict_to_redis(self._prefixes, redis_prefix, "_prefix_")
self._push_dict_index_to_redis(self._invalid_operations_index, redis_prefix, "_inv_op_index_")
self._push_dict_to_redis(self._invalid_operations, redis_prefix, "_inv_op_")
self._push_dict_index_to_redis(self._zone_exceptions_index, redis_prefix, "_zone_ex_index_")
self._push_dict_to_redis(self._zone_exceptions, redis_prefix, "_zone_ex_")
return True |
<SYSTEM_TASK:>
Returns lookup data of an ADIF Entity
<END_TASK>
<USER_TASK:>
Description:
def lookup_entity(self, entity=None):
"""Returns lookup data of an ADIF Entity
Args:
entity (int): ADIF identifier of country
Returns:
dict: Dictionary containing the country specific data
Raises:
KeyError: No matching entity found
Example:
The following code queries the the Clublog XML database for the ADIF entity Turkmenistan, which has
the id 273.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogapi", apikey="myapikey")
>>> print my_lookuplib.lookup_entity(273)
{
'deleted': False,
'country': u'TURKMENISTAN',
'longitude': 58.4,
'cqz': 17,
'prefix': u'EZ',
'latitude': 38.0,
'continent': u'AS'
}
Note:
This method is available for the following lookup type
- clublogxml
- redis
- qrz.com
""" |
if self._lookuptype == "clublogxml":
entity = int(entity)
if entity in self._entities:
return self._strip_metadata(self._entities[entity])
else:
raise KeyError
elif self._lookuptype == "redis":
if self._redis_prefix is None:
raise KeyError ("redis_prefix is missing")
#entity = str(entity)
json_data = self._redis.get(self._redis_prefix + "_entity_" + str(entity))
if json_data is not None:
my_dict = self._deserialize_data(json_data)
return self._strip_metadata(my_dict)
elif self._lookuptype == "qrz":
result = self._lookup_qrz_dxcc(entity, self._apikey)
return result
# no matching case
raise KeyError |
<SYSTEM_TASK:>
Create a copy of dict and remove not needed data
<END_TASK>
<USER_TASK:>
Description:
def _strip_metadata(self, my_dict):
"""
Create a copy of dict and remove not needed data
""" |
new_dict = copy.deepcopy(my_dict)
if const.START in new_dict:
del new_dict[const.START]
if const.END in new_dict:
del new_dict[const.END]
if const.WHITELIST in new_dict:
del new_dict[const.WHITELIST]
if const.WHITELIST_START in new_dict:
del new_dict[const.WHITELIST_START]
if const.WHITELIST_END in new_dict:
del new_dict[const.WHITELIST_END]
return new_dict |
<SYSTEM_TASK:>
Returns lookup data if an exception exists for a callsign
<END_TASK>
<USER_TASK:>
Description:
def lookup_callsign(self, callsign=None, timestamp=timestamp_now):
"""
Returns lookup data if an exception exists for a callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the callsign
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code queries the the online Clublog API for the callsign "VK9XO" on a specific date.
>>> from pyhamtools import LookupLib
>>> from datetime import datetime
>>> import pytz
>>> my_lookuplib = LookupLib(lookuptype="clublogapi", apikey="myapikey")
>>> timestamp = datetime(year=1962, month=7, day=7, tzinfo=pytz.UTC)
>>> print my_lookuplib.lookup_callsign("VK9XO", timestamp)
{
'country': u'CHRISTMAS ISLAND',
'longitude': 105.7,
'cqz': 29,
'adif': 35,
'latitude': -10.5,
'continent': u'OC'
}
Note:
This method is available for
- clublogxml
- clublogapi
- countryfile
- qrz.com
- redis
""" |
callsign = callsign.strip().upper()
if self._lookuptype == "clublogapi":
callsign_data = self._lookup_clublogAPI(callsign=callsign, timestamp=timestamp, apikey=self._apikey)
if callsign_data[const.ADIF]==1000:
raise KeyError
else:
return callsign_data
elif self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(callsign, timestamp, self._callsign_exceptions, self._callsign_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_call_ex_", "_call_ex_index_", self._redis_prefix, callsign)
return self._check_data_for_date(callsign, timestamp, data_dict, index)
# no matching case
elif self._lookuptype == "qrz":
return self._lookup_qrz_callsign(callsign, self._apikey, self._apiv)
raise KeyError("unknown Callsign") |
<SYSTEM_TASK:>
Retrieve the data of an item from redis and put it in an index and data dictionary to match the
<END_TASK>
<USER_TASK:>
Description:
def _get_dicts_from_redis(self, name, index_name, redis_prefix, item):
"""
Retrieve the data of an item from redis and put it in an index and data dictionary to match the
common query interface.
""" |
r = self._redis
data_dict = {}
data_index_dict = {}
if redis_prefix is None:
raise KeyError ("redis_prefix is missing")
if r.scard(redis_prefix + index_name + str(item)) > 0:
data_index_dict[str(item)] = r.smembers(redis_prefix + index_name + str(item))
for i in data_index_dict[item]:
json_data = r.get(redis_prefix + name + str(int(i)))
data_dict[i] = self._deserialize_data(json_data)
return (data_dict, data_index_dict)
raise KeyError ("No Data found in Redis for "+ item) |
<SYSTEM_TASK:>
Checks if the item is found in the index. An entry in the index points to the data
<END_TASK>
<USER_TASK:>
Description:
def _check_data_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks if the item is found in the index. An entry in the index points to the data
in the data_dict. This is mainly used retrieve callsigns and prefixes.
In case data is found for item, a dict containing the data is returned. Otherwise a KeyError is raised.
""" |
if item in data_index_dict:
for item in data_index_dict[item]:
# startdate < timestamp
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.START]
return item_data
# enddate > timestamp
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.END]
return item_data
# startdate > timestamp > enddate
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
item_data = copy.deepcopy(data_dict[item])
del item_data[const.START]
del item_data[const.END]
return item_data
# no startdate or enddate available
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return data_dict[item]
raise KeyError |
<SYSTEM_TASK:>
Checks if the callsign is marked as an invalid operation for a given timestamp.
<END_TASK>
<USER_TASK:>
Description:
def _check_inv_operation_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks if the callsign is marked as an invalid operation for a given timestamp.
In case the operation is invalid, True is returned. Otherwise a KeyError is raised.
""" |
if item in data_index_dict:
for item in data_index_dict[item]:
# startdate < timestamp
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
return True
# enddate > timestamp
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
return True
# startdate > timestamp > enddate
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
return True
# no startdate or enddate available
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return True
raise KeyError |
<SYSTEM_TASK:>
Returns lookup data of a Prefix
<END_TASK>
<USER_TASK:>
Description:
def lookup_prefix(self, prefix, timestamp=timestamp_now):
"""
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis
""" |
prefix = prefix.strip().upper()
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_prefix_", "_prefix_index_", self._redis_prefix, prefix)
return self._check_data_for_date(prefix, timestamp, data_dict, index)
# no matching case
raise KeyError |
<SYSTEM_TASK:>
Returns True if an operations is known as invalid
<END_TASK>
<USER_TASK:>
Description:
def is_invalid_operation(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):
"""
Returns True if an operations is known as invalid
Args:
callsign (string): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
bool: True if a record exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if the operation is valid for two dates.
>>> from pyhamtools import LookupLib
>>> from datetime import datetime
>>> import pytz
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.is_invalid_operation("5W1CFN")
True
>>> try:
>>> timestamp = datetime(year=2012, month=1, day=31).replace(tzinfo=pytz.UTC)
>>> my_lookuplib.is_invalid_operation("5W1CFN", timestamp)
>>> except KeyError:
>>> print "Seems to be invalid operation before 31.1.2012"
Seems to be an invalid operation before 31.1.2012
Note:
This method is available for
- clublogxml
- redis
""" |
callsign = callsign.strip().upper()
if self._lookuptype == "clublogxml":
return self._check_inv_operation_for_date(callsign, timestamp, self._invalid_operations, self._invalid_operations_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_inv_op_", "_inv_op_index_", self._redis_prefix, callsign)
return self._check_inv_operation_for_date(callsign, timestamp, data_dict, index)
#no matching case
raise KeyError |
<SYSTEM_TASK:>
Checks the index and data if a cq-zone exception exists for the callsign
<END_TASK>
<USER_TASK:>
Description:
def _check_zone_exception_for_date(self, item, timestamp, data_dict, data_index_dict):
"""
Checks the index and data if a cq-zone exception exists for the callsign
When a zone exception is found, the zone is returned. If no exception is found
a KeyError is raised
""" |
if item in data_index_dict:
for item in data_index_dict[item]:
# startdate < timestamp
if const.START in data_dict[item] and not const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp:
return data_dict[item][const.CQZ]
# enddate > timestamp
elif not const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.END] > timestamp:
return data_dict[item][const.CQZ]
# startdate > timestamp > enddate
elif const.START in data_dict[item] and const.END in data_dict[item]:
if data_dict[item][const.START] < timestamp \
and data_dict[item][const.END] > timestamp:
return data_dict[item][const.CQZ]
# no startdate or enddate available
elif not const.START in data_dict[item] and not const.END in data_dict[item]:
return data_dict[item][const.CQZ]
raise KeyError |
<SYSTEM_TASK:>
Returns a CQ Zone if an exception exists for the given callsign
<END_TASK>
<USER_TASK:>
Description:
def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):
"""
Returns a CQ Zone if an exception exists for the given callsign
Args:
callsign (string): Amateur radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
int: Value of the the CQ Zone exception which exists for this callsign (at the given time)
Raises:
KeyError: No matching callsign found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN.
>>> from pyhamtools import LookupLib
>>> my_lookuplib = LookupLib(lookuptype="clublogxml", apikey="myapikey")
>>> print my_lookuplib.lookup_zone_exception("DP0GVN")
38
The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore
in CQ Zone 38
Note:
This method is available for
- clublogxml
- redis
""" |
callsign = callsign.strip().upper()
if self._lookuptype == "clublogxml":
return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign)
return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index)
#no matching case
raise KeyError |
<SYSTEM_TASK:>
Extract the header of the Clublog XML File
<END_TASK>
<USER_TASK:>
Description:
def _extract_clublog_header(self, cty_xml_filename):
"""
Extract the header of the Clublog XML File
""" |
cty_header = {}
try:
with open(cty_xml_filename, "r") as cty:
raw_header = cty.readline()
cty_date = re.search("date='.+'", raw_header)
if cty_date:
cty_date = cty_date.group(0).replace("date=", "").replace("'", "")
cty_date = datetime.strptime(cty_date[:19], '%Y-%m-%dT%H:%M:%S')
cty_date.replace(tzinfo=UTC)
cty_header["Date"] = cty_date
cty_ns = re.search("xmlns='.+[']", raw_header)
if cty_ns:
cty_ns = cty_ns.group(0).replace("xmlns=", "").replace("'", "")
cty_header['NameSpace'] = cty_ns
if len(cty_header) == 2:
self._logger.debug("Header successfully retrieved from CTY File")
elif len(cty_header) < 2:
self._logger.warning("Header could only be partically retrieved from CTY File")
self._logger.warning("Content of Header: ")
for key in cty_header:
self._logger.warning(str(key)+": "+str(cty_header[key]))
return cty_header
except Exception as e:
self._logger.error("Clublog CTY File could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return |
<SYSTEM_TASK:>
remove the header of the Clublog XML File to make it
<END_TASK>
<USER_TASK:>
Description:
def _remove_clublog_xml_header(self, cty_xml_filename):
"""
remove the header of the Clublog XML File to make it
properly parseable for the python ElementTree XML parser
""" |
import tempfile
try:
with open(cty_xml_filename, "r") as f:
content = f.readlines()
cty_dir = tempfile.gettempdir()
cty_name = os.path.split(cty_xml_filename)[1]
cty_xml_filename_no_header = os.path.join(cty_dir, "NoHeader_"+cty_name)
with open(cty_xml_filename_no_header, "w") as f:
f.writelines("<clublog>\n\r")
f.writelines(content[1:])
self._logger.debug("Header successfully modified for XML Parsing")
return cty_xml_filename_no_header
except Exception as e:
self._logger.error("Clublog CTY could not be opened / modified")
self._logger.error("Error Message: " + str(e))
return |
<SYSTEM_TASK:>
Return the names of all callable attributes of an object
<END_TASK>
<USER_TASK:>
Description:
def get_methods(*objs):
""" Return the names of all callable attributes of an object""" |
return set(
attr
for obj in objs
for attr in dir(obj)
if not attr.startswith('_') and callable(getattr(obj, attr))
) |
<SYSTEM_TASK:>
Create a new Config object from a configuration file.
<END_TASK>
<USER_TASK:>
Description:
def from_file(cls, filename, *, strict=True):
""" Create a new Config object from a configuration file.
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Returns:
An instance of the Config class.
Raises:
ConfigLoadError: If the configuration cannot be found.
""" |
config = cls()
config.load_from_file(filename, strict=strict)
return config |
<SYSTEM_TASK:>
Load the configuration from a file.
<END_TASK>
<USER_TASK:>
Description:
def load_from_file(self, filename=None, *, strict=True):
""" Load the configuration from a file.
The location of the configuration file can either be specified directly in the
parameter filename or is searched for in the following order:
1. In the environment variable given by LIGHTFLOW_CONFIG_ENV
2. In the current execution directory
3. In the user's home directory
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Raises:
ConfigLoadError: If the configuration cannot be found.
""" |
self.set_to_default()
if filename:
self._update_from_file(filename)
else:
if LIGHTFLOW_CONFIG_ENV not in os.environ:
if os.path.isfile(os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME)):
self._update_from_file(
os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME))
elif os.path.isfile(expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME))):
self._update_from_file(
expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME)))
else:
if strict:
raise ConfigLoadError('Could not find the configuration file.')
else:
self._update_from_file(expand_env_var(os.environ[LIGHTFLOW_CONFIG_ENV]))
self._update_python_paths() |
<SYSTEM_TASK:>
Load the configuration from a dictionary.
<END_TASK>
<USER_TASK:>
Description:
def load_from_dict(self, conf_dict=None):
""" Load the configuration from a dictionary.
Args:
conf_dict (dict): Dictionary with the configuration.
""" |
self.set_to_default()
self._update_dict(self._config, conf_dict)
self._update_python_paths() |
<SYSTEM_TASK:>
Helper method to update an existing configuration with the values from a file.
<END_TASK>
<USER_TASK:>
Description:
def _update_from_file(self, filename):
""" Helper method to update an existing configuration with the values from a file.
Loads a configuration file and replaces all values in the existing configuration
dictionary with the values from the file.
Args:
filename (str): The path and name to the configuration file.
""" |
if os.path.exists(filename):
try:
with open(filename, 'r') as config_file:
yaml_dict = yaml.safe_load(config_file.read())
if yaml_dict is not None:
self._update_dict(self._config, yaml_dict)
except IsADirectoryError:
raise ConfigLoadError(
'The specified configuration file is a directory not a file')
else:
raise ConfigLoadError('The config file {} does not exist'.format(filename)) |
<SYSTEM_TASK:>
Recursively merges the fields for two dictionaries.
<END_TASK>
<USER_TASK:>
Description:
def _update_dict(self, to_dict, from_dict):
""" Recursively merges the fields for two dictionaries.
Args:
to_dict (dict): The dictionary onto which the merge is executed.
from_dict (dict): The dictionary merged into to_dict
""" |
for key, value in from_dict.items():
if key in to_dict and isinstance(to_dict[key], dict) and \
isinstance(from_dict[key], dict):
self._update_dict(to_dict[key], from_dict[key])
else:
to_dict[key] = from_dict[key] |
<SYSTEM_TASK:>
Append the workflow and libraries paths to the PYTHONPATH.
<END_TASK>
<USER_TASK:>
Description:
def _update_python_paths(self):
""" Append the workflow and libraries paths to the PYTHONPATH. """ |
for path in self._config['workflows'] + self._config['libraries']:
if os.path.isdir(os.path.abspath(path)):
if path not in sys.path:
sys.path.append(path)
else:
raise ConfigLoadError(
'Workflow directory {} does not exist'.format(path)) |
<SYSTEM_TASK:>
Chop Line from DX-Cluster into pieces and return a dict with the spot data
<END_TASK>
<USER_TASK:>
Description:
def decode_char_spot(raw_string):
"""Chop Line from DX-Cluster into pieces and return a dict with the spot data""" |
data = {}
# Spotter callsign
if re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]):
data[const.SPOTTER] = re.sub(':', '', re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]).group(0))
else:
raise ValueError
if re.search('[0-9\.]{5,12}', raw_string[10:25]):
data[const.FREQUENCY] = float(re.search('[0-9\.]{5,12}', raw_string[10:25]).group(0))
else:
raise ValueError
data[const.DX] = re.sub('[^A-Za-z0-9\/]+', '', raw_string[26:38])
data[const.COMMENT] = re.sub('[^\sA-Za-z0-9\.,;\#\+\-!\?\$\(\)@\/]+', ' ', raw_string[39:69]).strip()
data[const.TIME] = datetime.now().replace(tzinfo=UTC)
return data |
<SYSTEM_TASK:>
Decode PC11 message, which usually contains DX Spots
<END_TASK>
<USER_TASK:>
Description:
def decode_pc11_message(raw_string):
"""Decode PC11 message, which usually contains DX Spots""" |
data = {}
spot = raw_string.split("^")
data[const.FREQUENCY] = float(spot[1])
data[const.DX] = spot[2]
data[const.TIME] = datetime.fromtimestamp(mktime(strptime(spot[3]+" "+spot[4][:-1], "%d-%b-%Y %H%M")))
data[const.COMMENT] = spot[5]
data[const.SPOTTER] = spot[6]
data["node"] = spot[7]
data["raw_spot"] = raw_string
return data |
<SYSTEM_TASK:>
Decode PC23 Message which usually contains WCY
<END_TASK>
<USER_TASK:>
Description:
def decode_pc23_message(raw_string):
""" Decode PC23 Message which usually contains WCY """ |
data = {}
wcy = raw_string.split("^")
data[const.R] = int(wcy[1])
data[const.expk] = int(wcy[2])
data[const.CALLSIGN] = wcy[3]
data[const.A] = wcy[4]
data[const.SFI] = wcy[5]
data[const.K] = wcy[6]
data[const.AURORA] = wcy[7]
data["node"] = wcy[7]
data["ip"] = wcy[8]
data["raw_data"] = raw_string
return data |
<SYSTEM_TASK:>
The internal run method that decorates the public run method.
<END_TASK>
<USER_TASK:>
Description:
def _run(self, data, store, signal, context, *,
success_callback=None, stop_callback=None, abort_callback=None):
""" The internal run method that decorates the public run method.
This method makes sure data is being passed to and from the task.
Args:
data (MultiTaskData): The data object that has been passed from the
predecessor task.
store (DataStoreDocument): The persistent data store object that allows the
task to store data for access across the current
workflow run.
signal (TaskSignal): The signal object for tasks. It wraps the construction
and sending of signals into easy to use methods.
context (TaskContext): The context in which the tasks runs.
success_callback: This function is called when the task completed successfully
stop_callback: This function is called when a StopTask exception was raised.
abort_callback: This function is called when an AbortWorkflow exception
was raised.
Raises:
TaskReturnActionInvalid: If the return value of the task is not
an Action object.
Returns:
Action: An Action object containing the data that should be passed on
to the next task and optionally a list of successor tasks that
should be executed.
""" |
if data is None:
data = MultiTaskData()
data.add_dataset(self._name)
try:
if self._callback_init is not None:
self._callback_init(data, store, signal, context)
result = self.run(data, store, signal, context)
if self._callback_finally is not None:
self._callback_finally(TaskStatus.Success, data, store, signal, context)
if success_callback is not None:
success_callback()
# the task should be stopped and optionally all successor tasks skipped
except StopTask as err:
if self._callback_finally is not None:
self._callback_finally(TaskStatus.Stopped, data, store, signal, context)
if stop_callback is not None:
stop_callback(exc=err)
result = Action(data, limit=[]) if err.skip_successors else None
# the workflow should be stopped immediately
except AbortWorkflow as err:
if self._callback_finally is not None:
self._callback_finally(TaskStatus.Aborted, data, store, signal, context)
if abort_callback is not None:
abort_callback(exc=err)
result = None
signal.stop_workflow()
# catch any other exception, call the finally callback, then re-raise
except:
if self._callback_finally is not None:
self._callback_finally(TaskStatus.Error, data, store, signal, context)
signal.stop_workflow()
raise
# handle the returned data (either implicitly or as an returned Action object) by
# flattening all, possibly modified, input datasets in the MultiTask data down to
# a single output dataset.
if result is None:
data.flatten(in_place=True)
data.add_task_history(self.name)
return Action(data)
else:
if not isinstance(result, Action):
raise TaskReturnActionInvalid()
result.data.flatten(in_place=True)
result.data.add_task_history(self.name)
return result |
<SYSTEM_TASK:>
converts WGS84 coordinates into the corresponding Maidenhead Locator
<END_TASK>
<USER_TASK:>
Description:
def latlong_to_locator (latitude, longitude):
"""converts WGS84 coordinates into the corresponding Maidenhead Locator
Args:
latitude (float): Latitude
longitude (float): Longitude
Returns:
string: Maidenhead locator
Raises:
ValueError: When called with wrong or invalid input args
TypeError: When args are non float values
Example:
The following example converts latitude and longitude into the Maidenhead locator
>>> from pyhamtools.locator import latlong_to_locator
>>> latitude = 48.5208333
>>> longitude = 9.375
>>> latlong_to_locator(latitude, longitude)
'JN48QM'
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North)
""" |
if longitude >= 180 or longitude <= -180:
raise ValueError
if latitude >= 90 or latitude <= -90:
raise ValueError
longitude += 180;
latitude +=90;
locator = chr(ord('A') + int(longitude / 20))
locator += chr(ord('A') + int(latitude / 10))
locator += chr(ord('0') + int((longitude % 20) / 2))
locator += chr(ord('0') + int(latitude % 10))
locator += chr(ord('A') + int((longitude - int(longitude / 2) * 2) / (2 / 24)))
locator += chr(ord('A') + int((latitude - int(latitude / 1) * 1 ) / (1 / 24)))
return locator |
<SYSTEM_TASK:>
converts Maidenhead locator in the corresponding WGS84 coordinates
<END_TASK>
<USER_TASK:>
Description:
def locator_to_latlong (locator):
"""converts Maidenhead locator in the corresponding WGS84 coordinates
Args:
locator (string): Locator, either 4 or 6 characters
Returns:
tuple (float, float): Latitude, Longitude
Raises:
ValueError: When called with wrong or invalid input arg
TypeError: When arg is not a string
Example:
The following example converts a Maidenhead locator into Latitude and Longitude
>>> from pyhamtools.locator import locator_to_latlong
>>> latitude, longitude = locator_to_latlong("JN48QM")
>>> print latitude, longitude
48.5208333333 9.375
Note:
Latitude (negative = West, positive = East)
Longitude (negative = South, positive = North)
""" |
locator = locator.upper()
if len(locator) == 5 or len(locator) < 4:
raise ValueError
if ord(locator[0]) > ord('R') or ord(locator[0]) < ord('A'):
raise ValueError
if ord(locator[1]) > ord('R') or ord(locator[1]) < ord('A'):
raise ValueError
if ord(locator[2]) > ord('9') or ord(locator[2]) < ord('0'):
raise ValueError
if ord(locator[3]) > ord('9') or ord(locator[3]) < ord('0'):
raise ValueError
if len(locator) == 6:
if ord(locator[4]) > ord('X') or ord(locator[4]) < ord('A'):
raise ValueError
if ord (locator[5]) > ord('X') or ord(locator[5]) < ord('A'):
raise ValueError
longitude = (ord(locator[0]) - ord('A')) * 20 - 180
latitude = (ord(locator[1]) - ord('A')) * 10 - 90
longitude += (ord(locator[2]) - ord('0')) * 2
latitude += (ord(locator[3]) - ord('0'))
if len(locator) == 6:
longitude += ((ord(locator[4])) - ord('A')) * (2 / 24)
latitude += ((ord(locator[5])) - ord('A')) * (1 / 24)
# move to center of subsquare
longitude += 1 / 24
latitude += 0.5 / 24
else:
# move to center of square
longitude += 1;
latitude += 0.5;
return latitude, longitude |
<SYSTEM_TASK:>
calculates the heading from the first to the second locator
<END_TASK>
<USER_TASK:>
Description:
def calculate_heading(locator1, locator2):
"""calculates the heading from the first to the second locator
Args:
locator1 (string): Locator, either 4 or 6 characters
locator2 (string): Locator, either 4 or 6 characters
Returns:
float: Heading in deg
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the heading from locator1 to locator2
>>> from pyhamtools.locator import calculate_heading
>>> calculate_heading("JN48QM", "QF67bf")
74.3136
""" |
lat1, long1 = locator_to_latlong(locator1)
lat2, long2 = locator_to_latlong(locator2)
r_lat1 = radians(lat1)
r_lon1 = radians(long1)
r_lat2 = radians(lat2)
r_lon2 = radians(long2)
d_lon = radians(long2 - long1)
b = atan2(sin(d_lon)*cos(r_lat2),cos(r_lat1)*sin(r_lat2)-sin(r_lat1)*cos(r_lat2)*cos(d_lon)) # bearing calc
bd = degrees(b)
br,bn = divmod(bd+360,360) # the bearing remainder and final bearing
return bn |
<SYSTEM_TASK:>
calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
<END_TASK>
<USER_TASK:>
Description:
def calculate_sunrise_sunset(locator, calc_date=datetime.utcnow()):
"""calculates the next sunset and sunrise for a Maidenhead locator at a give date & time
Args:
locator1 (string): Maidenhead Locator, either 4 or 6 characters
calc_date (datetime, optional): Starting datetime for the calculations (UTC)
Returns:
dict: Containing datetimes for morning_dawn, sunrise, evening_dawn, sunset
Raises:
ValueError: When called with wrong or invalid input arg
AttributeError: When args are not a string
Example:
The following calculates the next sunrise & sunset for JN48QM on the 1./Jan/2014
>>> from pyhamtools.locator import calculate_sunrise_sunset
>>> from datetime import datetime
>>> import pytz
>>> UTC = pytz.UTC
>>> myDate = datetime(year=2014, month=1, day=1, tzinfo=UTC)
>>> calculate_sunrise_sunset("JN48QM", myDate)
{
'morning_dawn': datetime.datetime(2014, 1, 1, 6, 36, 51, 710524, tzinfo=<UTC>),
'sunset': datetime.datetime(2014, 1, 1, 16, 15, 23, 31016, tzinfo=<UTC>),
'evening_dawn': datetime.datetime(2014, 1, 1, 15, 38, 8, 355315, tzinfo=<UTC>),
'sunrise': datetime.datetime(2014, 1, 1, 7, 14, 6, 162063, tzinfo=<UTC>)
}
""" |
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
latitude, longitude = locator_to_latlong(locator)
if type(calc_date) != datetime:
raise ValueError
sun = ephem.Sun()
home = ephem.Observer()
home.lat = str(latitude)
home.long = str(longitude)
home.date = calc_date
sun.compute(home)
try:
nextrise = home.next_rising(sun)
nextset = home.next_setting(sun)
home.horizon = '-6'
beg_twilight = home.next_rising(sun, use_center=True)
end_twilight = home.next_setting(sun, use_center=True)
morning_dawn = beg_twilight.datetime()
sunrise = nextrise.datetime()
evening_dawn = nextset.datetime()
sunset = end_twilight.datetime()
#if sun never sets or rises (e.g. at polar circles)
except ephem.AlwaysUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
except ephem.NeverUpError as e:
morning_dawn = None
sunrise = None
evening_dawn = None
sunset = None
result = {}
result['morning_dawn'] = morning_dawn
result['sunrise'] = sunrise
result['evening_dawn'] = evening_dawn
result['sunset'] = sunset
if morning_dawn:
result['morning_dawn'] = morning_dawn.replace(tzinfo=UTC)
if sunrise:
result['sunrise'] = sunrise.replace(tzinfo=UTC)
if evening_dawn:
result['evening_dawn'] = evening_dawn.replace(tzinfo=UTC)
if sunset:
result['sunset'] = sunset.replace(tzinfo=UTC)
return result |
<SYSTEM_TASK:>
Encode Python objects into a byte stream using cloudpickle.
<END_TASK>
<USER_TASK:>
Description:
def cloudpickle_dumps(obj, dumper=cloudpickle.dumps):
""" Encode Python objects into a byte stream using cloudpickle. """ |
return dumper(obj, protocol=serialization.pickle_protocol) |
<SYSTEM_TASK:>
Monkey patch Celery to use cloudpickle instead of pickle.
<END_TASK>
<USER_TASK:>
Description:
def patch_celery():
""" Monkey patch Celery to use cloudpickle instead of pickle. """ |
registry = serialization.registry
serialization.pickle = cloudpickle
registry.unregister('pickle')
registry.register('pickle', cloudpickle_dumps, cloudpickle_loads,
content_type='application/x-python-serialize',
content_encoding='binary')
import celery.worker as worker
import celery.concurrency.asynpool as asynpool
worker.state.pickle = cloudpickle
asynpool._pickle = cloudpickle
import billiard.common
billiard.common.pickle = cloudpickle
billiard.common.pickle_dumps = cloudpickle_dumps
billiard.common.pickle_loads = cloudpickle_loads |
<SYSTEM_TASK:>
Connects to the redis database.
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
""" Connects to the redis database. """ |
self._connection = StrictRedis(
host=self._host,
port=self._port,
db=self._database,
password=self._password) |
<SYSTEM_TASK:>
Returns a single request.
<END_TASK>
<USER_TASK:>
Description:
def receive(self):
""" Returns a single request.
Takes the first request from the list of requests and returns it. If the list
is empty, None is returned.
Returns:
Response: If a new request is available a Request object is returned,
otherwise None is returned.
""" |
pickled_request = self._connection.connection.lpop(self._request_key)
return pickle.loads(pickled_request) if pickled_request is not None else None |
<SYSTEM_TASK:>
Send a response back to the client that issued a request.
<END_TASK>
<USER_TASK:>
Description:
def send(self, response):
""" Send a response back to the client that issued a request.
Args:
response (Response): Reference to the response object that should be sent.
""" |
self._connection.connection.set('{}:{}'.format(SIGNAL_REDIS_PREFIX, response.uid),
pickle.dumps(response)) |
<SYSTEM_TASK:>
Push the request back onto the queue.
<END_TASK>
<USER_TASK:>
Description:
def restore(self, request):
""" Push the request back onto the queue.
Args:
request (Request): Reference to a request object that should be pushed back
onto the request queue.
""" |
self._connection.connection.rpush(self._request_key, pickle.dumps(request)) |
<SYSTEM_TASK:>
Send a request to the server and wait for its response.
<END_TASK>
<USER_TASK:>
Description:
def send(self, request):
""" Send a request to the server and wait for its response.
Args:
request (Request): Reference to a request object that is sent to the server.
Returns:
Response: The response from the server to the request.
""" |
self._connection.connection.rpush(self._request_key, pickle.dumps(request))
resp_key = '{}:{}'.format(SIGNAL_REDIS_PREFIX, request.uid)
while True:
if self._connection.polling_time > 0.0:
sleep(self._connection.polling_time)
response_data = self._connection.connection.get(resp_key)
if response_data is not None:
self._connection.connection.delete(resp_key)
break
return pickle.loads(response_data) |
<SYSTEM_TASK:>
Verifies if pattern for matching and finding fulfill expected structure.
<END_TASK>
<USER_TASK:>
Description:
def verify_pattern(pattern):
"""Verifies if pattern for matching and finding fulfill expected structure.
:param pattern: string pattern to verify
:return: True if pattern has proper syntax, False otherwise
""" |
regex = re.compile("^!?[a-zA-Z]+$|[*]{1,2}$")
def __verify_pattern__(__pattern__):
if not __pattern__:
return False
elif __pattern__[0] == "!":
return __verify_pattern__(__pattern__[1:])
elif __pattern__[0] == "[" and __pattern__[-1] == "]":
return all(__verify_pattern__(p) for p in __pattern__[1:-1].split(","))
else:
return regex.match(__pattern__)
return all(__verify_pattern__(p) for p in pattern.split("/")) |
<SYSTEM_TASK:>
Matches given sentence with provided pattern.
<END_TASK>
<USER_TASK:>
Description:
def match_tree(sentence, pattern):
"""Matches given sentence with provided pattern.
:param sentence: sentence from Spacy(see: http://spacy.io/docs/#doc-spans-sents) representing complete statement
:param pattern: pattern to which sentence will be compared
:return: True if sentence match to pattern, False otherwise
:raises: PatternSyntaxException: if pattern has wrong syntax
""" |
if not verify_pattern(pattern):
raise PatternSyntaxException(pattern)
def _match_node(t, p):
pat_node = p.pop(0) if p else ""
return not pat_node or (_match_token(t, pat_node, False) and _match_edge(t.children,p))
def _match_edge(edges,p):
pat_edge = p.pop(0) if p else ""
if not pat_edge:
return True
elif not edges:
return False
else:
for (t) in edges:
if (_match_token(t, pat_edge, True)) and _match_node(t, list(p)):
return True
elif pat_edge == "**" and _match_edge(t.children, ["**"] + p):
return True
return False
return _match_node(sentence.root, pattern.split("/")) |
<SYSTEM_TASK:>
Split a string using a single-character delimter
<END_TASK>
<USER_TASK:>
Description:
def split (s, delimter, trim = True, limit = 0): # pragma: no cover
"""
Split a string using a single-character delimter
@params:
`s`: the string
`delimter`: the single-character delimter
`trim`: whether to trim each part. Default: True
@examples:
```python
ret = split("'a,b',c", ",")
# ret == ["'a,b'", "c"]
# ',' inside quotes will be recognized.
```
@returns:
The list of substrings
""" |
ret = []
special1 = ['(', ')', '[', ']', '{', '}']
special2 = ['\'', '"']
special3 = '\\'
flags1 = [0, 0, 0]
flags2 = [False, False]
flags3 = False
start = 0
nlim = 0
for i, c in enumerate(s):
if c == special3:
# next char is escaped
flags3 = not flags3
elif not flags3:
# no escape
if c in special1:
index = special1.index(c)
if index % 2 == 0:
flags1[int(index/2)] += 1
else:
flags1[int(index/2)] -= 1
elif c in special2:
index = special2.index(c)
flags2[index] = not flags2[index]
elif c == delimter and not any(flags1) and not any(flags2):
r = s[start:i]
if trim: r = r.strip()
ret.append(r)
start = i + 1
nlim = nlim + 1
if limit and nlim >= limit:
break
else:
# escaping closed
flags3 = False
r = s[start:]
if trim: r = r.strip()
ret.append(r)
return ret |
<SYSTEM_TASK:>
Render this template by applying it to `context`.
<END_TASK>
<USER_TASK:>
Description:
def render(self, **context):
"""
Render this template by applying it to `context`.
@params:
`context`: a dictionary of values to use in this rendering.
@returns:
The rendered string
""" |
# Make the complete context we'll use.
localns = self.envs.copy()
localns.update(context)
try:
exec(str(self.code), None, localns)
return localns[Liquid.COMPLIED_RENDERED_STR]
except Exception:
stacks = list(reversed(traceback.format_exc().splitlines()))
for stack in stacks:
stack = stack.strip()
if stack.startswith('File "<string>"'):
lineno = int(stack.split(', ')[1].split()[-1])
source = []
if 'NameError:' in stacks[0]:
source.append('Do you forget to provide the data?')
import math
source.append('\nCompiled source (use debug mode to see full source):')
source.append('---------------------------------------------------')
nlines = len(self.code.codes)
nbit = int(math.log(nlines, 10)) + 3
for i, line in enumerate(self.code.codes):
if i - 7 > lineno or i + 9 < lineno: continue
if i + 1 != lineno:
source.append(' ' + (str(i+1) + '.').ljust(nbit) + str(line).rstrip())
else:
source.append('* ' + (str(i+1) + '.').ljust(nbit) + str(line).rstrip())
raise LiquidRenderError(
stacks[0],
repr(self.code.codes[lineno - 1]) +
'\n' + '\n'.join(source) +
'\n\nPREVIOUS EXCEPTION:\n------------------\n' +
'\n'.join(stacks) + '\n' +
'\nCONTEXT:\n------------------\n' +
'\n'.join(
' ' + key + ': ' + str(val)
for key, val in localns.items() if not key.startswith('_liquid_') and not key.startswith('__')
) + '\n'
)
raise |
<SYSTEM_TASK:>
Add a line of source to the code.
<END_TASK>
<USER_TASK:>
Description:
def addLine(self, line):
"""
Add a line of source to the code.
Indentation and newline will be added for you, don't provide them.
@params:
`line`: The line to add
""" |
if not isinstance(line, LiquidLine):
line = LiquidLine(line)
line.ndent = self.ndent
self.codes.append(line) |
<SYSTEM_TASK:>
Fetch currency conversion rate from the database
<END_TASK>
<USER_TASK:>
Description:
def get_rate_from_db(currency: str) -> Decimal:
"""
Fetch currency conversion rate from the database
""" |
from .models import ConversionRate
try:
rate = ConversionRate.objects.get_rate(currency)
except ConversionRate.DoesNotExist: # noqa
raise ValueError('No conversion rate for %s' % (currency, ))
return rate.rate |
<SYSTEM_TASK:>
Get conversion rate to use in exchange
<END_TASK>
<USER_TASK:>
Description:
def get_conversion_rate(from_currency: str, to_currency: str) -> Decimal:
"""
Get conversion rate to use in exchange
""" |
reverse_rate = False
if to_currency == BASE_CURRENCY:
# Fetch exchange rate for base currency and use 1 / rate for conversion
rate_currency = from_currency
reverse_rate = True
else:
rate_currency = to_currency
rate = get_rate_from_db(rate_currency)
if reverse_rate:
conversion_rate = Decimal(1) / rate
else:
conversion_rate = rate
return conversion_rate |
<SYSTEM_TASK:>
Calculates precipitation statistics for the cascade model while aggregating hourly observations
<END_TASK>
<USER_TASK:>
Description:
def calc_precipitation_stats(self, months=None, avg_stats=True, percentile=50):
"""
Calculates precipitation statistics for the cascade model while aggregating hourly observations
Parameters
----------
months : Months for each seasons to be used for statistics (array of numpy array, default=1-12, e.g., [np.arange(12) + 1])
avg_stats : average statistics for all levels True/False (default=True)
percentile : percentil for splitting the dataset in small and high intensities (default=50)
""" |
if months is None:
months = [np.arange(12) + 1]
self.precip.months = months
self.precip.stats = melodist.build_casc(self.data, months=months, avg_stats=avg_stats, percentile=percentile) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.