text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ituz(self, callsign, timestamp=timestamp_now): """ Returns ITU Zone of a callsign Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: containing the callsign's CQ Zone Raises: KeyError: No ITU Zone found for callsign Note: Currently, only Country-files.com lookup database contains ITU Zones """
return self.get_all(callsign, timestamp)[const.ITUZ]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_country_name(self, callsign, timestamp=timestamp_now): """ Returns the country name where the callsign is located Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: str: name of the Country Raises: KeyError: No Country found for callsign Note: Don't rely on the country name when working with several instances of py:class:`Callinfo`. Clublog and Country-files.org use slightly different names for countries. Example: - Country-files.com: "Fed. Rep. of Germany" - Clublog: "FEDERAL REPUBLIC OF GERMANY" """
return self.get_all(callsign, timestamp)[const.COUNTRY]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_adif_id(self, callsign, timestamp=timestamp_now): """ Returns ADIF id of a callsign's country Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: containing the country ADIF id Raises: KeyError: No Country found for callsign """
return self.get_all(callsign, timestamp)[const.ADIF]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_continent(self, callsign, timestamp=timestamp_now): """ Returns the continent Identifier of a callsign Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: str: continent identified Raises: KeyError: No Continent found for callsign Note: The following continent identifiers are used: - EU: Europe - NA: North America - SA: South America - AS: Asia - AF: Africa - OC: Oceania - AN: Antarctica """
return self.get_all(callsign, timestamp)[const.CONTINENT]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_indices(lst, element): """ Returns the indices for all occurrences of 'element' in 'lst'. Args: lst (list): List to search. element: Element to find. Returns: list: List of indices or values """
result = [] offset = -1 while True: try: offset = lst.index(element, offset+1) except ValueError: return result result.append(offset)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_name(cls, name, *, queue=DefaultJobQueueName.Workflow, clear_data_store=True, arguments=None): """ Create a workflow object from a workflow script. Args: name (str): The name of the workflow script. queue (str): Name of the queue the workflow should be scheduled to. clear_data_store (bool): Remove any documents created during the workflow run in the data store after the run. arguments (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. Returns: Workflow: A fully initialised workflow object """
new_workflow = cls(queue=queue, clear_data_store=clear_data_store) new_workflow.load(name, arguments=arguments) return new_workflow
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, name, *, arguments=None, validate_arguments=True, strict_dag=False): """ Import the workflow script and load all known objects. The workflow script is treated like a module and imported into the Python namespace. After the import, the method looks for instances of known classes and stores a reference for further use in the workflow object. Args: name (str): The name of the workflow script. arguments (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. validate_arguments (bool): Whether to check that all required arguments have been supplied. strict_dag (bool): If true then the loaded workflow module must contain an instance of Dag. Raises: WorkflowArgumentError: If the workflow requires arguments to be set that were not supplied to the workflow. WorkflowImportError: If the import of the workflow fails. """
arguments = {} if arguments is None else arguments try: workflow_module = importlib.import_module(name) dag_present = False # extract objects of specific types from the workflow module for key, obj in workflow_module.__dict__.items(): if isinstance(obj, Dag): self._dags_blueprint[obj.name] = obj dag_present = True elif isinstance(obj, Parameters): self._parameters.extend(obj) self._name = name self._docstring = inspect.getdoc(workflow_module) del sys.modules[name] if strict_dag and not dag_present: raise WorkflowImportError( 'Workflow does not include a dag {}'.format(name)) if validate_arguments: missing_parameters = self._parameters.check_missing(arguments) if len(missing_parameters) > 0: raise WorkflowArgumentError( 'The following parameters are required ' + 'by the workflow, but are missing: {}'.format( ', '.join(missing_parameters))) self._provided_arguments = arguments except (TypeError, ImportError): logger.error('Cannot import workflow {}'.format(name)) raise WorkflowImportError('Cannot import workflow {}'.format(name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, config, data_store, signal_server, workflow_id): """ Run all autostart dags in the workflow. Only the dags that are flagged as autostart are started. Args: config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. data_store (DataStore): A DataStore object that is fully initialised and connected to the persistent data storage. signal_server (Server): A signal Server object that receives requests from dags and tasks. workflow_id (str): A unique workflow id that represents this workflow run """
self._workflow_id = workflow_id self._celery_app = create_app(config) # pre-fill the data store with supplied arguments args = self._parameters.consolidate(self._provided_arguments) for key, value in args.items(): data_store.get(self._workflow_id).set(key, value) # start all dags with the autostart flag set to True for name, dag in self._dags_blueprint.items(): if dag.autostart: self._queue_dag(name) # as long as there are dags in the list keep running while self._dags_running: if config.workflow_polling_time > 0.0: sleep(config.workflow_polling_time) # handle new requests from dags, tasks and the library (e.g. cli, web) for i in range(MAX_SIGNAL_REQUESTS): request = signal_server.receive() if request is None: break try: response = self._handle_request(request) if response is not None: signal_server.send(response) else: signal_server.restore(request) except (RequestActionUnknown, RequestFailed): signal_server.send(Response(success=False, uid=request.uid)) # remove any dags and their result data that finished running for name, dag in list(self._dags_running.items()): if dag.ready(): if self._celery_app.conf.result_expires == 0: dag.forget() del self._dags_running[name] elif dag.failed(): self._stop_workflow = True # remove the signal entry signal_server.clear() # delete all entries in the data_store under this workflow id, if requested if self._clear_data_store: data_store.remove(self._workflow_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _queue_dag(self, name, *, data=None): """ Add a new dag to the queue. If the stop workflow flag is set, no new dag can be queued. Args: name (str): The name of the dag that should be queued. data (MultiTaskData): The data that should be passed on to the new dag. Raises: DagNameUnknown: If the specified dag name does not exist Returns: str: The name of the queued dag. """
if self._stop_workflow: return None if name not in self._dags_blueprint: raise DagNameUnknown() new_dag = copy.deepcopy(self._dags_blueprint[name]) new_dag.workflow_name = self.name self._dags_running[new_dag.name] = self._celery_app.send_task( JobExecPath.Dag, args=(new_dag, self._workflow_id, data), queue=new_dag.queue, routing_key=new_dag.queue) return new_dag.name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_request(self, request): """ Handle an incoming request by forwarding it to the appropriate method. Args: request (Request): Reference to a request object containing the incoming request. Raises: RequestActionUnknown: If the action specified in the request is not known. Returns: Response: A response object containing the response from the method handling the request. """
if request is None: return Response(success=False, uid=request.uid) action_map = { 'start_dag': self._handle_start_dag, 'stop_workflow': self._handle_stop_workflow, 'join_dags': self._handle_join_dags, 'stop_dag': self._handle_stop_dag, 'is_dag_stopped': self._handle_is_dag_stopped } if request.action in action_map: return action_map[request.action](request) else: raise RequestActionUnknown()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_start_dag(self, request): """ The handler for the start_dag request. The start_dag request creates a new dag and adds it to the queue. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'name': the name of the dag that should be started 'data': the data that is passed onto the start tasks Returns: Response: A response object containing the following fields: - dag_name: The name of the started dag. """
dag_name = self._queue_dag(name=request.payload['name'], data=request.payload['data']) return Response(success=dag_name is not None, uid=request.uid, payload={'dag_name': dag_name})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_stop_workflow(self, request): """ The handler for the stop_workflow request. The stop_workflow request adds all running dags to the list of dags that should be stopped and prevents new dags from being started. The dags will then stop queueing new tasks, which will terminate the dags and in turn the workflow. Args: request (Request): Reference to a request object containing the incoming request. Returns: Response: A response object containing the following fields: - success: True if the dags were added successfully to the list of dags that should be stopped. """
self._stop_workflow = True for name, dag in self._dags_running.items(): if name not in self._stop_dags: self._stop_dags.append(name) return Response(success=True, uid=request.uid)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_join_dags(self, request): """ The handler for the join_dags request. If dag names are given in the payload only return a valid Response if none of the dags specified by the names are running anymore. If no dag names are given, wait for all dags except one, which by design is the one that issued the request, to be finished. Args: request (Request): Reference to a request object containing the incoming request. Returns: Response: A response object containing the following fields: - success: True if all dags the request was waiting for have completed. """
if request.payload['names'] is None: send_response = len(self._dags_running) <= 1 else: send_response = all([name not in self._dags_running.keys() for name in request.payload['names']]) if send_response: return Response(success=True, uid=request.uid) else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_stop_dag(self, request): """ The handler for the stop_dag request. The stop_dag request adds a dag to the list of dags that should be stopped. The dag will then stop queueing new tasks and will eventually stop running. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'name': the name of the dag that should be stopped Returns: Response: A response object containing the following fields: - success: True if the dag was added successfully to the list of dags that should be stopped. """
if (request.payload['name'] is not None) and \ (request.payload['name'] not in self._stop_dags): self._stop_dags.append(request.payload['name']) return Response(success=True, uid=request.uid)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_is_dag_stopped(self, request): """ The handler for the dag_stopped request. The dag_stopped request checks whether a dag is flagged to be terminated. Args: request (Request): Reference to a request object containing the incoming request. The payload has to contain the following fields: 'dag_name': the name of the dag that should be checked Returns: Response: A response object containing the following fields: - is_stopped: True if the dag is flagged to be stopped. """
return Response(success=True, uid=request.uid, payload={ 'is_stopped': request.payload['dag_name'] in self._stop_dags })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop(self, consumer): """ This function is called when the worker received a request to terminate. Upon the termination of the worker, the workflows for all running jobs are stopped gracefully. Args: consumer (Consumer): Reference to the consumer object that handles messages from the broker. """
stopped_workflows = [] for request in [r for r in consumer.controller.state.active_requests]: job = AsyncResult(request.id) workflow_id = job.result['workflow_id'] if workflow_id not in stopped_workflows: client = Client( SignalConnection(**consumer.app.user_options['config'].signal, auto_connect=True), request_key=workflow_id) client.send(Request(action='stop_workflow')) stopped_workflows.append(workflow_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_dag(self, dag, *, data=None): """ Schedule the execution of a dag by sending a signal to the workflow. Args: dag (Dag, str): The dag object or the name of the dag that should be started. data (MultiTaskData): The data that should be passed on to the new dag. Returns: str: The name of the successfully started dag. """
return self._client.send( Request( action='start_dag', payload={'name': dag.name if isinstance(dag, Dag) else dag, 'data': data if isinstance(data, MultiTaskData) else None} ) ).payload['dag_name']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def join_dags(self, names=None): """ Wait for the specified dags to terminate. This function blocks until the specified dags terminate. If no dags are specified wait for all dags of the workflow, except the dag of the task calling this signal, to terminate. Args: names (list): The names of the dags that have to terminate. Returns: bool: True if all the signal was sent successfully. """
return self._client.send( Request( action='join_dags', payload={'names': names} ) ).success
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_dag(self, name=None): """ Send a stop signal to the specified dag or the dag that hosts this task. Args: name str: The name of the dag that should be stopped. If no name is given the dag that hosts this task is stopped. Upon receiving the stop signal, the dag will not queue any new tasks and wait for running tasks to terminate. Returns: bool: True if the signal was sent successfully. """
return self._client.send( Request( action='stop_dag', payload={'name': name if name is not None else self._dag_name} ) ).success
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_stopped(self): """ Check whether the task received a stop signal from the workflow. Tasks can use the stop flag to gracefully terminate their work. This is particularly important for long running tasks and tasks that employ an infinite loop, such as trigger tasks. Returns: bool: True if the task should be stopped. """
resp = self._client.send( Request( action='is_dag_stopped', payload={'dag_name': self._dag_name} ) ) return resp.payload['is_stopped']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def event_stream(app, *, filter_by_prefix=None): """ Generator function that returns celery events. This function turns the callback based celery event handling into a generator. Args: app: Reference to a celery application object. filter_by_prefix (str): If not None, only allow events that have a type that starts with this prefix to yield an generator event. Returns: generator: A generator that returns celery events. """
q = Queue() def handle_event(event): if filter_by_prefix is None or\ (filter_by_prefix is not None and event['type'].startswith(filter_by_prefix)): q.put(event) def receive_events(): with app.connection() as connection: recv = app.events.Receiver(connection, handlers={ '*': handle_event }) recv.capture(limit=None, timeout=None, wakeup=True) t = threading.Thread(target=receive_events) t.start() while True: yield q.get(block=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_event_model(event): """ Factory function that turns a celery event into an event object. Args: event (dict): A dictionary that represents a celery event. Returns: object: An event object representing the received event. Raises: JobEventTypeUnsupported: If an unsupported celery job event was received. WorkerEventTypeUnsupported: If an unsupported celery worker event was received. EventTypeUnknown: If an unknown event type (neither job nor worker) was received. """
if event['type'].startswith('task'): factory = { JobEventName.Started: JobStartedEvent, JobEventName.Succeeded: JobSucceededEvent, JobEventName.Stopped: JobStoppedEvent, JobEventName.Aborted: JobAbortedEvent } if event['type'] in factory: return factory[event['type']].from_event(event) else: raise JobEventTypeUnsupported( 'Unsupported event type {}'.format(event['type'])) elif event['type'].startswith('worker'): raise WorkerEventTypeUnsupported( 'Unsupported event type {}'.format(event['type'])) else: raise EventTypeUnknown('Unknown event type {}'.format(event['type']))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_required(f): """ Decorator that checks whether a configuration file was set. """
def new_func(obj, *args, **kwargs): if 'config' not in obj: click.echo(_style(obj.get('show_color', False), 'Could not find a valid configuration file!', fg='red', bold=True)) raise click.Abort() else: return f(obj, *args, **kwargs) return update_wrapper(new_func, f)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ingest_config_obj(ctx, *, silent=True): """ Ingest the configuration object into the click context. """
try: ctx.obj['config'] = Config.from_file(ctx.obj['config_path']) except ConfigLoadError as err: click.echo(_style(ctx.obj['show_color'], str(err), fg='red', bold=True)) if not silent: raise click.Abort()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cli(ctx, config, no_color): """ Command line client for lightflow. A lightweight, high performance pipeline system for synchrotrons. Lightflow is being developed at the Australian Synchrotron. """
ctx.obj = { 'show_color': not no_color if no_color is not None else True, 'config_path': config }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_default(dest): """ Create a default configuration file. \b DEST: Path or file name for the configuration file. """
conf_path = Path(dest).resolve() if conf_path.is_dir(): conf_path = conf_path / LIGHTFLOW_CONFIG_NAME conf_path.write_text(Config.default()) click.echo('Configuration written to {}'.format(conf_path))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_list(ctx): """ List the current configuration. """
ingest_config_obj(ctx, silent=False) click.echo(json.dumps(ctx.obj['config'].to_dict(), indent=4))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def config_examples(dest, user_dir): """ Copy the example workflows to a directory. \b DEST: Path to which the examples should be copied. """
examples_path = Path(lightflow.__file__).parents[1] / 'examples' if examples_path.exists(): dest_path = Path(dest).resolve() if not user_dir: dest_path = dest_path / 'examples' if dest_path.exists(): if not click.confirm('Directory already exists. Overwrite existing files?', default=True, abort=True): return else: dest_path.mkdir() for example_file in examples_path.glob('*.py'): shutil.copy(str(example_file), str(dest_path / example_file.name)) click.echo('Copied examples to {}'.format(str(dest_path))) else: click.echo('The examples source path does not exist')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def workflow_start(obj, queue, keep_data, name, workflow_args): """ Send a workflow to the queue. \b NAME: The name of the workflow that should be started. WORKFLOW_ARGS: Workflow arguments in the form key1=value1 key2=value2. """
try: start_workflow(name=name, config=obj['config'], queue=queue, clear_data_store=not keep_data, store_args=dict([arg.split('=', maxsplit=1) for arg in workflow_args])) except (WorkflowArgumentError, WorkflowImportError) as e: click.echo(_style(obj['show_color'], 'An error occurred when trying to start the workflow', fg='red', bold=True)) click.echo('{}'.format(e)) except WorkflowDefinitionError as e: click.echo(_style(obj['show_color'], 'The graph {} in workflow {} is not a directed acyclic graph'. format(e.graph_name, e.workflow_name), fg='red', bold=True))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def workflow_stop(obj, names): """ Stop one or more running workflows. \b NAMES: The names, ids or job ids of the workflows that should be stopped. Leave empty to stop all running workflows. """
if len(names) == 0: msg = 'Would you like to stop all workflows?' else: msg = '\n{}\n\n{}'.format('\n'.join(names), 'Would you like to stop these jobs?') if click.confirm(msg, default=True, abort=True): stop_workflow(obj['config'], names=names if len(names) > 0 else None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def workflow_status(obj, details): """ Show the status of the workflows. """
show_colors = obj['show_color'] config_cli = obj['config'].cli if details: temp_form = '{:>{}} {:20} {:25} {:25} {:38} {}' else: temp_form = '{:>{}} {:20} {:25} {} {} {}' click.echo('\n') click.echo(temp_form.format( 'Status', 12, 'Name', 'Start Time', 'ID' if details else '', 'Job' if details else '', 'Arguments' )) click.echo('-' * (138 if details else 75)) def print_jobs(jobs, *, label='', color='green'): for job in jobs: start_time = job.start_time if job.start_time is not None else 'unknown' click.echo(temp_form.format( _style(show_colors, label, fg=color, bold=True), 25 if show_colors else 12, job.name, start_time.replace(tzinfo=pytz.utc).astimezone().strftime( config_cli['time_format']), job.workflow_id if details else '', job.id if details else '', ','.join(['{}={}'.format(k, v) for k, v in job.arguments.items()])) ) # running jobs print_jobs(list_jobs(config=obj['config'], status=JobStatus.Active, filter_by_type=JobType.Workflow), label='Running', color='green') # scheduled jobs print_jobs(list_jobs(config=obj['config'], status=JobStatus.Scheduled, filter_by_type=JobType.Workflow), label='Scheduled', color='blue') # registered jobs print_jobs(list_jobs(config=obj['config'], status=JobStatus.Registered, filter_by_type=JobType.Workflow), label='Registered', color='yellow') # reserved jobs print_jobs(list_jobs(config=obj['config'], status=JobStatus.Reserved, filter_by_type=JobType.Workflow), label='Reserved', color='yellow')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def worker_stop(obj, worker_ids): """ Stop running workers. \b WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all. """
if len(worker_ids) == 0: msg = 'Would you like to stop all workers?' else: msg = '\n{}\n\n{}'.format('\n'.join(worker_ids), 'Would you like to stop these workers?') if click.confirm(msg, default=True, abort=True): stop_worker(obj['config'], worker_ids=list(worker_ids) if len(worker_ids) > 0 else None)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def worker_status(obj, filter_queues, details): """ Show the status of all running workers. """
show_colors = obj['show_color'] f_queues = filter_queues.split(',') if filter_queues is not None else None workers = list_workers(config=obj['config'], filter_by_queues=f_queues) if len(workers) == 0: click.echo('No workers are running at the moment.') return for ws in workers: click.echo('{} {}'.format(_style(show_colors, 'Worker:', fg='blue', bold=True), _style(show_colors, ws.name, fg='blue'))) click.echo('{:23} {}'.format(_style(show_colors, '> pid:', bold=True), ws.pid)) if details: click.echo('{:23} {}'.format(_style(show_colors, '> concurrency:', bold=True), ws.concurrency)) click.echo('{:23} {}'.format(_style(show_colors, '> processes:', bold=True), ', '.join(str(p) for p in ws.process_pids))) click.echo('{:23} {}://{}:{}/{}'.format(_style(show_colors, '> broker:', bold=True), ws.broker.transport, ws.broker.hostname, ws.broker.port, ws.broker.virtual_host)) click.echo('{:23} {}'.format(_style(show_colors, '> queues:', bold=True), ', '.join([q.name for q in ws.queues]))) if details: click.echo('{:23} {}'.format(_style(show_colors, '> job count:', bold=True), ws.job_count)) jobs = list_jobs(config=obj['config'], filter_by_worker=ws.name) click.echo('{:23} [{}]'.format(_style(show_colors, '> jobs:', bold=True), len(jobs) if len(jobs) > 0 else 'No tasks')) for job in jobs: click.echo('{:15} {} {}'.format( '', _style(show_colors, '{}'.format(job.name), bold=True, fg=JOB_COLOR[job.type]), _style(show_colors, '({}) [{}] <{}> on {}'.format( job.type, job.workflow_id, job.id, job.worker_pid), fg=JOB_COLOR[job.type]))) click.echo('\n')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def monitor(ctx, details): """ Show the worker and workflow event stream. """
ingest_config_obj(ctx, silent=False) show_colors = ctx.obj['show_color'] event_display = { JobEventName.Started: {'color': 'blue', 'label': 'started'}, JobEventName.Succeeded: {'color': 'green', 'label': 'succeeded'}, JobEventName.Stopped: {'color': 'yellow', 'label': 'stopped'}, JobEventName.Aborted: {'color': 'red', 'label': 'aborted'} } click.echo('\n') click.echo('{:>10} {:>12} {:25} {:18} {:16} {:28} {}'.format( 'Status', 'Type', 'Name', 'Duration (sec)', 'Queue' if details else '', 'Workflow ID' if details else '', 'Worker' if details else '')) click.echo('-' * (136 if details else 65)) for event in workflow_events(ctx.obj['config']): evt_disp = event_display[event.event] click.echo('{:>{}} {:>{}} {:25} {:18} {:16} {:28} {}'.format( _style(show_colors, evt_disp['label'], fg=evt_disp['color']), 20 if show_colors else 10, _style(show_colors, event.type, bold=True, fg=JOB_COLOR[event.type]), 24 if show_colors else 12, event.name, '{0:.3f}'.format(event.duration) if event.duration is not None else '', event.queue if details else '', event.workflow_id if details else '', event.hostname if details else ''))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ext(obj, ext_name, ext_args): """ Run an extension by its name. \b EXT_NAME: The name of the extension. EXT_ARGS: Arguments that are passed to the extension. """
try: mod = import_module('lightflow_{}.__main__'.format(ext_name)) mod.main(ext_args) except ImportError as err: click.echo(_style(obj['show_color'], 'An error occurred when trying to call the extension', fg='red', bold=True)) click.echo('{}'.format(err))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_app(config): """ Create a fully configured Celery application object. Args: config (Config): A reference to a lightflow configuration object. Returns: Celery: A fully configured Celery application object. """
# configure the celery logging system with the lightflow settings setup_logging.connect(partial(_initialize_logging, config), weak=False) task_postrun.connect(partial(_cleanup_workflow, config), weak=False) # patch Celery to use cloudpickle instead of pickle for serialisation patch_celery() # create the main celery app and load the configuration app = Celery('lightflow') app.conf.update(**config.celery) # overwrite user supplied settings to make sure celery works with lightflow app.conf.update( task_serializer='pickle', accept_content=['pickle'], result_serializer='pickle', task_default_queue=DefaultJobQueueName.Task ) if isinstance(app.conf.include, list): app.conf.include.extend(LIGHTFLOW_INCLUDE) else: if len(app.conf.include) > 0: raise ConfigOverwriteError( 'The content in the include config will be overwritten') app.conf.include = LIGHTFLOW_INCLUDE return app
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _cleanup_workflow(config, task_id, args, **kwargs): """ Cleanup the results of a workflow when it finished. Connects to the postrun signal of Celery. If the signal was sent by a workflow, remove the result from the result backend. Args: task_id (str): The id of the task. args (tuple): The arguments the task was started with. **kwargs: Keyword arguments from the hook. """
from lightflow.models import Workflow if isinstance(args[0], Workflow): if config.celery['result_expires'] == 0: AsyncResult(task_id).forget()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def execute_dag(self, dag, workflow_id, data=None): """ Celery task that runs a single dag on a worker. This celery task starts, manages and monitors the individual tasks of a dag. Args: self (Task): Reference to itself, the celery task object. dag (Dag): Reference to a Dag object that is being used to start, manage and monitor tasks. workflow_id (string): The unique ID of the workflow run that started this dag. data (MultiTaskData): An optional MultiTaskData object that is being passed to the first tasks in the dag. This allows the transfer of data from dag to dag. """
start_time = datetime.utcnow() logger.info('Running DAG <{}>'.format(dag.name)) store_doc = DataStore(**self.app.user_options['config'].data_store, auto_connect=True).get(workflow_id) store_loc = 'log.{}'.format(dag.name) # update data store with provenance information store_doc.set(key='{}.start_time'.format(store_loc), value=start_time, section=DataStoreDocumentSection.Meta) # send custom celery event that the dag has been started self.send_event(JobEventName.Started, job_type=JobType.Dag, name=dag.name, queue=dag.queue, time=start_time, workflow_id=workflow_id, duration=None) # store job specific meta information wth the job self.update_state(meta={'name': dag.name, 'queue': dag.queue, 'type': JobType.Dag, 'workflow_id': workflow_id}) # run the tasks in the DAG signal = DagSignal(Client(SignalConnection(**self.app.user_options['config'].signal, auto_connect=True), request_key=workflow_id), dag.name) dag.run(config=self.app.user_options['config'], workflow_id=workflow_id, signal=signal, data=data) end_time = datetime.utcnow() duration = (end_time - start_time).total_seconds() # update data store with provenance information store_doc.set(key='{}.end_time'.format(store_loc), value=end_time, section=DataStoreDocumentSection.Meta) store_doc.set(key='{}.duration'.format(store_loc), value=duration, section=DataStoreDocumentSection.Meta) # send custom celery event that the dag has succeeded event_name = JobEventName.Succeeded if not signal.is_stopped else JobEventName.Aborted self.send_event(event_name, job_type=JobType.Dag, name=dag.name, queue=dag.queue, time=end_time, workflow_id=workflow_id, duration=duration) logger.info('Finished DAG <{}>'.format(dag.name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_celery(cls, broker_dict): """ Create a BrokerStats object from the dictionary returned by celery. Args: broker_dict (dict): The dictionary as returned by celery. Returns: BrokerStats: A fully initialized BrokerStats object. """
return BrokerStats( hostname=broker_dict['hostname'], port=broker_dict['port'], transport=broker_dict['transport'], virtual_host=broker_dict['virtual_host'] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Return a dictionary of the broker stats. Returns: dict: Dictionary of the stats. """
return { 'hostname': self.hostname, 'port': self.port, 'transport': self.transport, 'virtual_host': self.virtual_host }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_celery(cls, name, worker_dict, queues): """ Create a WorkerStats object from the dictionary returned by celery. Args: name (str): The name of the worker. worker_dict (dict): The dictionary as returned by celery. queues (list): A list of QueueStats objects that represent the queues this worker is listening on. Returns: WorkerStats: A fully initialized WorkerStats object. """
return WorkerStats( name=name, broker=BrokerStats.from_celery(worker_dict['broker']), pid=worker_dict['pid'], process_pids=worker_dict['pool']['processes'], concurrency=worker_dict['pool']['max-concurrency'], job_count=worker_dict['pool']['writes']['total'], queues=queues )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Return a dictionary of the worker stats. Returns: dict: Dictionary of the stats. """
return { 'name': self.name, 'broker': self.broker.to_dict(), 'pid': self.pid, 'process_pids': self.process_pids, 'concurrency': self.concurrency, 'job_count': self.job_count, 'queues': [q.to_dict() for q in self.queues] }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_celery(cls, worker_name, job_dict, celery_app): """ Create a JobStats object from the dictionary returned by celery. Args: worker_name (str): The name of the worker this jobs runs on. job_dict (dict): The dictionary as returned by celery. celery_app: Reference to a celery application object. Returns: JobStats: A fully initialized JobStats object. """
if not isinstance(job_dict, dict) or 'id' not in job_dict: raise JobStatInvalid('The job description is missing important fields.') async_result = AsyncResult(id=job_dict['id'], app=celery_app) a_info = async_result.info if isinstance(async_result.info, dict) else None return JobStats( name=a_info.get('name', '') if a_info is not None else '', job_id=job_dict['id'], job_type=a_info.get('type', '') if a_info is not None else '', workflow_id=a_info.get('workflow_id', '') if a_info is not None else '', queue=a_info.get('queue', '') if a_info is not None else '', start_time=a_info.get('start_time', None) if a_info is not None else None, arguments=a_info.get('arguments', {}) if a_info is not None else {}, acknowledged=job_dict['acknowledged'], func_name=job_dict['type'], hostname=job_dict['hostname'], worker_name=worker_name, worker_pid=job_dict['worker_pid'], routing_key=job_dict['delivery_info']['routing_key'] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Return a dictionary of the job stats. Returns: dict: Dictionary of the stats. """
return { 'name': self.name, 'id': self.id, 'type': self.type, 'workflow_id': self.workflow_id, 'queue': self.queue, 'start_time': self.start_time, 'arguments': self.arguments, 'acknowledged': self.acknowledged, 'func_name': self.func_name, 'hostname': self.hostname, 'worker_name': self.worker_name, 'worker_pid': self.worker_pid, 'routing_key': self.routing_key }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_event(cls, event): """ Create a JobEvent object from the event dictionary returned by celery. Args: event (dict): The dictionary as returned by celery. Returns: JobEvent: A fully initialized JobEvent object. """
return cls( uuid=event['uuid'], job_type=event['job_type'], event_type=event['type'], queue=event['queue'], hostname=event['hostname'], pid=event['pid'], name=event['name'], workflow_id=event['workflow_id'], event_time=event['time'], duration=event['duration'] )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_workflow(name, config, *, queue=DefaultJobQueueName.Workflow, clear_data_store=True, store_args=None): """ Start a single workflow by sending it to the workflow queue. Args: name (str): The name of the workflow that should be started. Refers to the name of the workflow file without the .py extension. config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. queue (str): Name of the queue the workflow should be scheduled to. clear_data_store (bool): Remove any documents created during the workflow run in the data store after the run. store_args (dict): Dictionary of additional arguments that are ingested into the data store prior to the execution of the workflow. Returns: str: The ID of the workflow job. Raises: WorkflowArgumentError: If the workflow requires arguments to be set in store_args that were not supplied to the workflow. WorkflowImportError: If the import of the workflow fails. """
try: wf = Workflow.from_name(name, queue=queue, clear_data_store=clear_data_store, arguments=store_args) except DirectedAcyclicGraphInvalid as e: raise WorkflowDefinitionError(workflow_name=name, graph_name=e.graph_name) celery_app = create_app(config) result = celery_app.send_task(JobExecPath.Workflow, args=(wf,), queue=queue, routing_key=queue) return result.id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_workflow(config, *, names=None): """ Stop one or more workflows. Args: config (Config): Reference to the configuration object from which the settings for the workflow are retrieved. names (list): List of workflow names, workflow ids or workflow job ids for the workflows that should be stopped. If all workflows should be stopped, set it to None. Returns: tuple: A tuple of the workflow jobs that were successfully stopped and the ones that could not be stopped. """
jobs = list_jobs(config, filter_by_type=JobType.Workflow) if names is not None: filtered_jobs = [] for job in jobs: if (job.id in names) or (job.name in names) or (job.workflow_id in names): filtered_jobs.append(job) else: filtered_jobs = jobs success = [] failed = [] for job in filtered_jobs: client = Client(SignalConnection(**config.signal, auto_connect=True), request_key=job.workflow_id) if client.send(Request(action='stop_workflow')).success: success.append(job) else: failed.append(job) return success, failed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_jobs(config, *, status=JobStatus.Active, filter_by_type=None, filter_by_worker=None): """ Return a list of Celery jobs. Args: config (Config): Reference to the configuration object from which the settings are retrieved. status (JobStatus): The status of the jobs that should be returned. filter_by_type (list): Restrict the returned jobs to the types in this list. filter_by_worker (list): Only return jobs that were registered, reserved or are running on the workers given in this list of worker names. Using this option will increase the performance. Returns: list: A list of JobStats. """
celery_app = create_app(config) # option to filter by the worker (improves performance) if filter_by_worker is not None: inspect = celery_app.control.inspect( destination=filter_by_worker if isinstance(filter_by_worker, list) else [filter_by_worker]) else: inspect = celery_app.control.inspect() # get active, registered or reserved jobs if status == JobStatus.Active: job_map = inspect.active() elif status == JobStatus.Registered: job_map = inspect.registered() elif status == JobStatus.Reserved: job_map = inspect.reserved() elif status == JobStatus.Scheduled: job_map = inspect.scheduled() else: job_map = None if job_map is None: return [] result = [] for worker_name, jobs in job_map.items(): for job in jobs: try: job_stats = JobStats.from_celery(worker_name, job, celery_app) if (filter_by_type is None) or (job_stats.type == filter_by_type): result.append(job_stats) except JobStatInvalid: pass return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def events(config): """ Return a generator that yields workflow events. For every workflow event that is sent from celery this generator yields an event object. Args: config (Config): Reference to the configuration object from which the settings are retrieved. Returns: generator: A generator that returns workflow events. """
celery_app = create_app(config) for event in event_stream(celery_app, filter_by_prefix='task'): try: yield create_event_model(event) except JobEventTypeUnsupported: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self): """ Drain the process output streams. """
read_stdout = partial(self._read_output, stream=self._process.stdout, callback=self._callback_stdout, output_file=self._stdout_file) read_stderr = partial(self._read_output, stream=self._process.stderr, callback=self._callback_stderr, output_file=self._stderr_file) # capture the process output as long as the process is active try: while self._process.poll() is None: result_stdout = read_stdout() result_stderr = read_stderr() if not result_stdout and not result_stderr: sleep(self._refresh_time) # read remaining lines while read_stdout(): pass while read_stderr(): pass except (StopTask, AbortWorkflow) as exc: self._exc_obj = exc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_output(self, stream, callback, output_file): """ Read the output of the process, executed the callback and save the output. Args: stream: A file object pointing to the output stream that should be read. callback(callable, None): A callback function that is called for each new line of output. output_file: A file object to which the full output is written. Returns: bool: True if a line was read from the output, otherwise False. """
if (callback is None and output_file is None) or stream.closed: return False line = stream.readline() if line: if callback is not None: callback(line.decode(), self._data, self._store, self._signal, self._context) if output_file is not None: output_file.write(line) return True else: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_as(user, group): """ Function wrapper that sets the user and group for the process """
def wrapper(): if user is not None: os.setuid(user) if group is not None: os.setgid(group) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert(self, value): """ Convert the specified value to the type of the option. Args: value: The value that should be converted. Returns: The value with the type given by the option. """
if self._type is str: return str(value) elif self._type is int: try: return int(value) except (UnicodeError, ValueError): raise WorkflowArgumentError('Cannot convert {} to int'.format(value)) elif self._type is float: try: return float(value) except (UnicodeError, ValueError): raise WorkflowArgumentError('Cannot convert {} to float'.format(value)) elif self._type is bool: if isinstance(value, bool): return bool(value) value = value.lower() if value in ('true', '1', 'yes', 'y'): return True elif value in ('false', '0', 'no', 'n'): return False raise WorkflowArgumentError('Cannot convert {} to bool'.format(value)) else: return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_missing(self, args): """ Returns the names of all options that are required but were not specified. All options that don't have a default value are required in order to run the workflow. Args: args (dict): A dictionary of the provided arguments that is checked for missing options. Returns: list: A list with the names of the options that are missing from the provided arguments. """
return [opt.name for opt in self if (opt.name not in args) and (opt.default is None)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def consolidate(self, args): """ Consolidate the provided arguments. If the provided arguments have matching options, this performs a type conversion. For any option that has a default value and is not present in the provided arguments, the default value is added. Args: args (dict): A dictionary of the provided arguments. Returns: dict: A dictionary with the type converted and with default options enriched arguments. """
result = dict(args) for opt in self: if opt.name in result: result[opt.name] = opt.convert(result[opt.name]) else: if opt.default is not None: result[opt.name] = opt.convert(opt.default) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, graph): """ Validate the graph by checking whether it is a directed acyclic graph. Args: graph (DiGraph): Reference to a DiGraph object from NetworkX. Raises: DirectedAcyclicGraphInvalid: If the graph is not a valid dag. """
if not nx.is_directed_acyclic_graph(graph): raise DirectedAcyclicGraphInvalid(graph_name=self._name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def merge(self, dataset): """ Merge the specified dataset on top of the existing data. This replaces all values in the existing dataset with the values from the given dataset. Args: dataset (TaskData): A reference to the TaskData object that should be merged on top of the existing object. """
def merge_data(source, dest): for key, value in source.items(): if isinstance(value, dict): merge_data(value, dest.setdefault(key, {})) else: dest[key] = value return dest merge_data(dataset.data, self._data) for h in dataset.task_history: if h not in self._task_history: self._task_history.append(h)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_dataset(self, task_name, dataset=None, *, aliases=None): """ Add a new dataset to the MultiTaskData. Args: task_name (str): The name of the task from which the dataset was received. dataset (TaskData): The dataset that should be added. aliases (list): A list of aliases that should be registered with the dataset. """
self._datasets.append(dataset if dataset is not None else TaskData()) last_index = len(self._datasets) - 1 self._aliases[task_name] = last_index if aliases is not None: for alias in aliases: self._aliases[alias] = last_index if len(self._datasets) == 1: self._default_index = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_alias(self, alias, index): """ Add an alias pointing to the specified index. Args: alias (str): The alias that should point to the given index. index (int): The index of the dataset for which an alias should be added. Raises: DataInvalidIndex: If the index does not represent a valid dataset. """
if index >= len(self._datasets): raise DataInvalidIndex('A dataset with index {} does not exist'.format(index)) self._aliases[alias] = index
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flatten(self, in_place=True): """ Merge all datasets into a single dataset. The default dataset is the last dataset to be merged, as it is considered to be the primary source of information and should overwrite all existing fields with the same key. Args: in_place (bool): Set to ``True`` to replace the existing datasets with the merged one. If set to ``False``, will return a new MultiTaskData object containing the merged dataset. Returns: MultiTaskData: If the in_place flag is set to False. """
new_dataset = TaskData() for i, dataset in enumerate(self._datasets): if i != self._default_index: new_dataset.merge(dataset) new_dataset.merge(self.default_dataset) # point all aliases to the new, single dataset new_aliases = {alias: 0 for alias, _ in self._aliases.items()} # replace existing datasets or return a new MultiTaskData object if in_place: self._datasets = [new_dataset] self._aliases = new_aliases self._default_index = 0 else: return MultiTaskData(dataset=new_dataset, aliases=list(new_aliases.keys()))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_default_by_alias(self, alias): """ Set the default dataset by its alias. After changing the default dataset, all calls without explicitly specifying the dataset by index or alias will be redirected to this dataset. Args: alias (str): The alias of the dataset that should be made the default. Raises: DataInvalidAlias: If the alias does not represent a valid dataset. """
if alias not in self._aliases: raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias)) self._default_index = self._aliases[alias]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_default_by_index(self, index): """ Set the default dataset by its index. After changing the default dataset, all calls without explicitly specifying the dataset by index or alias will be redirected to this dataset. Args: index (int): The index of the dataset that should be made the default. Raises: DataInvalidIndex: If the index does not represent a valid dataset. """
if index >= len(self._datasets): raise DataInvalidIndex('A dataset with index {} does not exist'.format(index)) self._default_index = index
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_by_alias(self, alias): """ Return a dataset by its alias. Args: alias (str): The alias of the dataset that should be returned. Raises: DataInvalidAlias: If the alias does not represent a valid dataset. """
if alias not in self._aliases: raise DataInvalidAlias('A dataset with alias {} does not exist'.format(alias)) return self.get_by_index(self._aliases[alias])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_by_index(self, index): """ Return a dataset by its index. Args: index (int): The index of the dataset that should be returned. Raises: DataInvalidIndex: If the index does not represent a valid dataset. """
if index >= len(self._datasets): raise DataInvalidIndex('A dataset with index {} does not exist'.format(index)) return self._datasets[index]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Return the task context content as a dictionary. """
return { 'task_name': self.task_name, 'dag_name': self.dag_name, 'workflow_name': self.workflow_name, 'workflow_id': self.workflow_id, 'worker_hostname': self.worker_hostname }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stop_worker(config, *, worker_ids=None): """ Stop a worker process. Args: config (Config): Reference to the configuration object from which the settings for the worker are retrieved. worker_ids (list): An optional list of ids for the worker that should be stopped. """
if worker_ids is not None and not isinstance(worker_ids, list): worker_ids = [worker_ids] celery_app = create_app(config) celery_app.control.shutdown(destination=worker_ids)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_workers(config, *, filter_by_queues=None): """ Return a list of all available workers. Args: config (Config): Reference to the configuration object from which the settings are retrieved. filter_by_queues (list): Restrict the returned workers to workers that listen to at least one of the queue names in this list. Returns: list: A list of WorkerStats objects. """
celery_app = create_app(config) worker_stats = celery_app.control.inspect().stats() queue_stats = celery_app.control.inspect().active_queues() if worker_stats is None: return [] workers = [] for name, w_stat in worker_stats.items(): queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]] add_worker = filter_by_queues is None if not add_worker: for queue in queues: if queue.name in filter_by_queues: add_worker = True break if add_worker: workers.append(WorkerStats.from_celery(name, w_stat, queues)) return workers
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eval(self, data, data_store, *, exclude=None): """ Return a new object in which callable parameters have been evaluated. Native types are not touched and simply returned, while callable methods are executed and their return value is returned. Args: data (MultiTaskData): The data object that has been passed from the predecessor task. data_store (DataStore): The persistent data store object that allows the task to store data for access across the current workflow run. exclude (list): List of key names as strings that should be excluded from the evaluation. Returns: TaskParameters: A new TaskParameters object with the callable parameters replaced by their return value. """
exclude = [] if exclude is None else exclude result = {} for key, value in self.items(): if key in exclude: continue if value is not None and callable(value): result[key] = value(data, data_store) else: result[key] = value return TaskParameters(result)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eval_single(self, key, data, data_store): """ Evaluate the value of a single parameter taking into account callables . Native types are not touched and simply returned, while callable methods are executed and their return value is returned. Args: key (str): The name of the parameter that should be evaluated. data (MultiTaskData): The data object that has been passed from the predecessor task. data_store (DataStore): The persistent data store object that allows the task to store data for access across the current workflow run. """
if key in self: value = self[key] if value is not None and callable(value): return value(data, data_store) else: return value else: raise AttributeError()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_data_in_redis(self, redis_prefix, redis_instance): """ Copy the complete lookup data into redis. Old data will be overwritten. Args: redis_prefix (str): Prefix to distinguish the data in redis for the different looktypes redis_instance (str): an Instance of Redis Returns: bool: returns True when the data has been copied successfully into Redis Example: Copy the entire lookup data from the Country-files.com PLIST File into Redis. This example requires a running instance of Redis, as well the python Redis connector (pip install redis-py). True Now let's create an instance of LookupLib, using Redis to query the data { u'adif': 460, u'continent': u'OC', u'country': u'Rotuma Island', u'cqz': 32, u'ituz': 56, u'latitude': -12.48, u'longitude': 177.08 } Note: This method is available for the following lookup type - clublogxml - countryfile """
if redis_instance is not None: self._redis = redis_instance if self._redis is None: raise AttributeError("redis_instance is missing") if redis_prefix is None: raise KeyError("redis_prefix is missing") if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile": self._push_dict_to_redis(self._entities, redis_prefix, "_entity_") self._push_dict_index_to_redis(self._callsign_exceptions_index, redis_prefix, "_call_ex_index_") self._push_dict_to_redis(self._callsign_exceptions, redis_prefix, "_call_ex_") self._push_dict_index_to_redis(self._prefixes_index, redis_prefix, "_prefix_index_") self._push_dict_to_redis(self._prefixes, redis_prefix, "_prefix_") self._push_dict_index_to_redis(self._invalid_operations_index, redis_prefix, "_inv_op_index_") self._push_dict_to_redis(self._invalid_operations, redis_prefix, "_inv_op_") self._push_dict_index_to_redis(self._zone_exceptions_index, redis_prefix, "_zone_ex_index_") self._push_dict_to_redis(self._zone_exceptions, redis_prefix, "_zone_ex_") return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lookup_entity(self, entity=None): """Returns lookup data of an ADIF Entity Args: entity (int): ADIF identifier of country Returns: dict: Dictionary containing the country specific data Raises: KeyError: No matching entity found Example: The following code queries the the Clublog XML database for the ADIF entity Turkmenistan, which has the id 273. { 'deleted': False, 'country': u'TURKMENISTAN', 'longitude': 58.4, 'cqz': 17, 'prefix': u'EZ', 'latitude': 38.0, 'continent': u'AS' } Note: This method is available for the following lookup type - clublogxml - redis - qrz.com """
if self._lookuptype == "clublogxml": entity = int(entity) if entity in self._entities: return self._strip_metadata(self._entities[entity]) else: raise KeyError elif self._lookuptype == "redis": if self._redis_prefix is None: raise KeyError ("redis_prefix is missing") #entity = str(entity) json_data = self._redis.get(self._redis_prefix + "_entity_" + str(entity)) if json_data is not None: my_dict = self._deserialize_data(json_data) return self._strip_metadata(my_dict) elif self._lookuptype == "qrz": result = self._lookup_qrz_dxcc(entity, self._apikey) return result # no matching case raise KeyError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _strip_metadata(self, my_dict): """ Create a copy of dict and remove not needed data """
new_dict = copy.deepcopy(my_dict) if const.START in new_dict: del new_dict[const.START] if const.END in new_dict: del new_dict[const.END] if const.WHITELIST in new_dict: del new_dict[const.WHITELIST] if const.WHITELIST_START in new_dict: del new_dict[const.WHITELIST_START] if const.WHITELIST_END in new_dict: del new_dict[const.WHITELIST_END] return new_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lookup_callsign(self, callsign=None, timestamp=timestamp_now): """ Returns lookup data if an exception exists for a callsign Args: callsign (string): Amateur radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: dict: Dictionary containing the country specific data of the callsign Raises: KeyError: No matching callsign found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code queries the the online Clublog API for the callsign "VK9XO" on a specific date. { 'country': u'CHRISTMAS ISLAND', 'longitude': 105.7, 'cqz': 29, 'adif': 35, 'latitude': -10.5, 'continent': u'OC' } Note: This method is available for - clublogxml - clublogapi - countryfile - qrz.com - redis """
callsign = callsign.strip().upper() if self._lookuptype == "clublogapi": callsign_data = self._lookup_clublogAPI(callsign=callsign, timestamp=timestamp, apikey=self._apikey) if callsign_data[const.ADIF]==1000: raise KeyError else: return callsign_data elif self._lookuptype == "clublogxml" or self._lookuptype == "countryfile": return self._check_data_for_date(callsign, timestamp, self._callsign_exceptions, self._callsign_exceptions_index) elif self._lookuptype == "redis": data_dict, index = self._get_dicts_from_redis("_call_ex_", "_call_ex_index_", self._redis_prefix, callsign) return self._check_data_for_date(callsign, timestamp, data_dict, index) # no matching case elif self._lookuptype == "qrz": return self._lookup_qrz_callsign(callsign, self._apikey, self._apiv) raise KeyError("unknown Callsign")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_dicts_from_redis(self, name, index_name, redis_prefix, item): """ Retrieve the data of an item from redis and put it in an index and data dictionary to match the common query interface. """
r = self._redis data_dict = {} data_index_dict = {} if redis_prefix is None: raise KeyError ("redis_prefix is missing") if r.scard(redis_prefix + index_name + str(item)) > 0: data_index_dict[str(item)] = r.smembers(redis_prefix + index_name + str(item)) for i in data_index_dict[item]: json_data = r.get(redis_prefix + name + str(int(i))) data_dict[i] = self._deserialize_data(json_data) return (data_dict, data_index_dict) raise KeyError ("No Data found in Redis for "+ item)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_data_for_date(self, item, timestamp, data_dict, data_index_dict): """ Checks if the item is found in the index. An entry in the index points to the data in the data_dict. This is mainly used retrieve callsigns and prefixes. In case data is found for item, a dict containing the data is returned. Otherwise a KeyError is raised. """
if item in data_index_dict: for item in data_index_dict[item]: # startdate < timestamp if const.START in data_dict[item] and not const.END in data_dict[item]: if data_dict[item][const.START] < timestamp: item_data = copy.deepcopy(data_dict[item]) del item_data[const.START] return item_data # enddate > timestamp elif not const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.END] > timestamp: item_data = copy.deepcopy(data_dict[item]) del item_data[const.END] return item_data # startdate > timestamp > enddate elif const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.START] < timestamp \ and data_dict[item][const.END] > timestamp: item_data = copy.deepcopy(data_dict[item]) del item_data[const.START] del item_data[const.END] return item_data # no startdate or enddate available elif not const.START in data_dict[item] and not const.END in data_dict[item]: return data_dict[item] raise KeyError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_inv_operation_for_date(self, item, timestamp, data_dict, data_index_dict): """ Checks if the callsign is marked as an invalid operation for a given timestamp. In case the operation is invalid, True is returned. Otherwise a KeyError is raised. """
if item in data_index_dict: for item in data_index_dict[item]: # startdate < timestamp if const.START in data_dict[item] and not const.END in data_dict[item]: if data_dict[item][const.START] < timestamp: return True # enddate > timestamp elif not const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.END] > timestamp: return True # startdate > timestamp > enddate elif const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.START] < timestamp \ and data_dict[item][const.END] > timestamp: return True # no startdate or enddate available elif not const.START in data_dict[item] and not const.END in data_dict[item]: return True raise KeyError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lookup_prefix(self, prefix, timestamp=timestamp_now): """ Returns lookup data of a Prefix Args: prefix (string): Prefix of a Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: dict: Dictionary containing the country specific data of the Prefix Raises: KeyError: No matching Prefix found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code shows how to obtain the information for the prefix "DH" from the countryfile.com database (default database). { 'adif': 230, 'country': u'Fed. Rep. of Germany', 'longitude': 10.0, 'cqz': 14, 'ituz': 28, 'latitude': 51.0, 'continent': u'EU' } Note: This method is available for - clublogxml - countryfile - redis """
prefix = prefix.strip().upper() if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile": return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index) elif self._lookuptype == "redis": data_dict, index = self._get_dicts_from_redis("_prefix_", "_prefix_index_", self._redis_prefix, prefix) return self._check_data_for_date(prefix, timestamp, data_dict, index) # no matching case raise KeyError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_invalid_operation(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)): """ Returns True if an operations is known as invalid Args: callsign (string): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: bool: True if a record exists for this callsign (at the given time) Raises: KeyError: No matching callsign found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code checks the Clublog XML database if the operation is valid for two dates. True Seems to be an invalid operation before 31.1.2012 Note: This method is available for - clublogxml - redis """
callsign = callsign.strip().upper() if self._lookuptype == "clublogxml": return self._check_inv_operation_for_date(callsign, timestamp, self._invalid_operations, self._invalid_operations_index) elif self._lookuptype == "redis": data_dict, index = self._get_dicts_from_redis("_inv_op_", "_inv_op_index_", self._redis_prefix, callsign) return self._check_inv_operation_for_date(callsign, timestamp, data_dict, index) #no matching case raise KeyError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_zone_exception_for_date(self, item, timestamp, data_dict, data_index_dict): """ Checks the index and data if a cq-zone exception exists for the callsign When a zone exception is found, the zone is returned. If no exception is found a KeyError is raised """
if item in data_index_dict: for item in data_index_dict[item]: # startdate < timestamp if const.START in data_dict[item] and not const.END in data_dict[item]: if data_dict[item][const.START] < timestamp: return data_dict[item][const.CQZ] # enddate > timestamp elif not const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.END] > timestamp: return data_dict[item][const.CQZ] # startdate > timestamp > enddate elif const.START in data_dict[item] and const.END in data_dict[item]: if data_dict[item][const.START] < timestamp \ and data_dict[item][const.END] > timestamp: return data_dict[item][const.CQZ] # no startdate or enddate available elif not const.START in data_dict[item] and not const.END in data_dict[item]: return data_dict[item][const.CQZ] raise KeyError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lookup_zone_exception(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)): """ Returns a CQ Zone if an exception exists for the given callsign Args: callsign (string): Amateur radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: int: Value of the the CQ Zone exception which exists for this callsign (at the given time) Raises: KeyError: No matching callsign found APIKeyMissingError: API Key for Clublog missing or incorrect Example: The following code checks the Clublog XML database if a CQ Zone exception exists for the callsign DP0GVN. 38 The prefix "DP" It is assigned to Germany, but the station is located in Antarctica, and therefore in CQ Zone 38 Note: This method is available for - clublogxml - redis """
callsign = callsign.strip().upper() if self._lookuptype == "clublogxml": return self._check_zone_exception_for_date(callsign, timestamp, self._zone_exceptions, self._zone_exceptions_index) elif self._lookuptype == "redis": data_dict, index = self._get_dicts_from_redis("_zone_ex_", "_zone_ex_index_", self._redis_prefix, callsign) return self._check_zone_exception_for_date(callsign, timestamp, data_dict, index) #no matching case raise KeyError
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _lookup_clublogAPI(self, callsign=None, timestamp=timestamp_now, url="https://secure.clublog.org/dxcc", apikey=None): """ Set up the Lookup object for Clublog Online API """
params = {"year" : timestamp.strftime("%Y"), "month" : timestamp.strftime("%m"), "day" : timestamp.strftime("%d"), "hour" : timestamp.strftime("%H"), "minute" : timestamp.strftime("%M"), "api" : apikey, "full" : "1", "call" : callsign } if sys.version_info.major == 3: encodeurl = url + "?" + urllib.parse.urlencode(params) else: encodeurl = url + "?" + urllib.urlencode(params) response = requests.get(encodeurl, timeout=5) if not self._check_html_response(response): raise LookupError jsonLookup = response.json() lookup = {} for item in jsonLookup: if item == "Name": lookup[const.COUNTRY] = jsonLookup["Name"] elif item == "DXCC": lookup[const.ADIF] = int(jsonLookup["DXCC"]) elif item == "Lon": lookup[const.LONGITUDE] = float(jsonLookup["Lon"])*(-1) elif item == "Lat": lookup[const.LATITUDE] = float(jsonLookup["Lat"]) elif item == "CQZ": lookup[const.CQZ] = int(jsonLookup["CQZ"]) elif item == "Continent": lookup[const.CONTINENT] = jsonLookup["Continent"] if lookup[const.ADIF] == 0: raise KeyError else: return lookup
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _extract_clublog_header(self, cty_xml_filename): """ Extract the header of the Clublog XML File """
cty_header = {} try: with open(cty_xml_filename, "r") as cty: raw_header = cty.readline() cty_date = re.search("date='.+'", raw_header) if cty_date: cty_date = cty_date.group(0).replace("date=", "").replace("'", "") cty_date = datetime.strptime(cty_date[:19], '%Y-%m-%dT%H:%M:%S') cty_date.replace(tzinfo=UTC) cty_header["Date"] = cty_date cty_ns = re.search("xmlns='.+[']", raw_header) if cty_ns: cty_ns = cty_ns.group(0).replace("xmlns=", "").replace("'", "") cty_header['NameSpace'] = cty_ns if len(cty_header) == 2: self._logger.debug("Header successfully retrieved from CTY File") elif len(cty_header) < 2: self._logger.warning("Header could only be partically retrieved from CTY File") self._logger.warning("Content of Header: ") for key in cty_header: self._logger.warning(str(key)+": "+str(cty_header[key])) return cty_header except Exception as e: self._logger.error("Clublog CTY File could not be opened / modified") self._logger.error("Error Message: " + str(e)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _remove_clublog_xml_header(self, cty_xml_filename): """ remove the header of the Clublog XML File to make it properly parseable for the python ElementTree XML parser """
import tempfile try: with open(cty_xml_filename, "r") as f: content = f.readlines() cty_dir = tempfile.gettempdir() cty_name = os.path.split(cty_xml_filename)[1] cty_xml_filename_no_header = os.path.join(cty_dir, "NoHeader_"+cty_name) with open(cty_xml_filename_no_header, "w") as f: f.writelines("<clublog>\n\r") f.writelines(content[1:]) self._logger.debug("Header successfully modified for XML Parsing") return cty_xml_filename_no_header except Exception as e: self._logger.error("Clublog CTY could not be opened / modified") self._logger.error("Error Message: " + str(e)) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_random_word(self, length): """ Generates a random word """
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _serialize_data(self, my_dict): """ Serialize a Dictionary into JSON """
new_dict = {} for item in my_dict: if isinstance(my_dict[item], datetime): new_dict[item] = my_dict[item].strftime('%Y-%m-%d%H:%M:%S') else: new_dict[item] = str(my_dict[item]) return json.dumps(new_dict)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _deserialize_data(self, json_data): """ Deserialize a JSON into a dictionary """
my_dict = json.loads(json_data.decode('utf8').replace("'", '"'), encoding='UTF-8') for item in my_dict: if item == const.ADIF: my_dict[item] = int(my_dict[item]) elif item == const.DELETED: my_dict[item] = self._str_to_bool(my_dict[item]) elif item == const.CQZ: my_dict[item] = int(my_dict[item]) elif item == const.ITUZ: my_dict[item] = int(my_dict[item]) elif item == const.LATITUDE: my_dict[item] = float(my_dict[item]) elif item == const.LONGITUDE: my_dict[item] = float(my_dict[item]) elif item == const.START: my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC) elif item == const.END: my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC) elif item == const.WHITELIST_START: my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC) elif item == const.WHITELIST_END: my_dict[item] = datetime.strptime(my_dict[item], '%Y-%m-%d%H:%M:%S').replace(tzinfo=UTC) elif item == const.WHITELIST: my_dict[item] = self._str_to_bool(my_dict[item]) else: my_dict[item] = unicode(my_dict[item]) return my_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_methods(*objs): """ Return the names of all callable attributes of an object"""
return set( attr for obj in objs for attr in dir(obj) if not attr.startswith('_') and callable(getattr(obj, attr)) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_file(cls, filename, *, strict=True): """ Create a new Config object from a configuration file. Args: filename (str): The location and name of the configuration file. strict (bool): If true raises a ConfigLoadError when the configuration cannot be found. Returns: An instance of the Config class. Raises: ConfigLoadError: If the configuration cannot be found. """
config = cls() config.load_from_file(filename, strict=strict) return config
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_from_file(self, filename=None, *, strict=True): """ Load the configuration from a file. The location of the configuration file can either be specified directly in the parameter filename or is searched for in the following order: 1. In the environment variable given by LIGHTFLOW_CONFIG_ENV 2. In the current execution directory 3. In the user's home directory Args: filename (str): The location and name of the configuration file. strict (bool): If true raises a ConfigLoadError when the configuration cannot be found. Raises: ConfigLoadError: If the configuration cannot be found. """
self.set_to_default() if filename: self._update_from_file(filename) else: if LIGHTFLOW_CONFIG_ENV not in os.environ: if os.path.isfile(os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME)): self._update_from_file( os.path.join(os.getcwd(), LIGHTFLOW_CONFIG_NAME)) elif os.path.isfile(expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME))): self._update_from_file( expand_env_var('~/{}'.format(LIGHTFLOW_CONFIG_NAME))) else: if strict: raise ConfigLoadError('Could not find the configuration file.') else: self._update_from_file(expand_env_var(os.environ[LIGHTFLOW_CONFIG_ENV])) self._update_python_paths()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_from_dict(self, conf_dict=None): """ Load the configuration from a dictionary. Args: conf_dict (dict): Dictionary with the configuration. """
self.set_to_default() self._update_dict(self._config, conf_dict) self._update_python_paths()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_from_file(self, filename): """ Helper method to update an existing configuration with the values from a file. Loads a configuration file and replaces all values in the existing configuration dictionary with the values from the file. Args: filename (str): The path and name to the configuration file. """
if os.path.exists(filename): try: with open(filename, 'r') as config_file: yaml_dict = yaml.safe_load(config_file.read()) if yaml_dict is not None: self._update_dict(self._config, yaml_dict) except IsADirectoryError: raise ConfigLoadError( 'The specified configuration file is a directory not a file') else: raise ConfigLoadError('The config file {} does not exist'.format(filename))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_dict(self, to_dict, from_dict): """ Recursively merges the fields for two dictionaries. Args: to_dict (dict): The dictionary onto which the merge is executed. from_dict (dict): The dictionary merged into to_dict """
for key, value in from_dict.items(): if key in to_dict and isinstance(to_dict[key], dict) and \ isinstance(from_dict[key], dict): self._update_dict(to_dict[key], from_dict[key]) else: to_dict[key] = from_dict[key]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_python_paths(self): """ Append the workflow and libraries paths to the PYTHONPATH. """
for path in self._config['workflows'] + self._config['libraries']: if os.path.isdir(os.path.abspath(path)): if path not in sys.path: sys.path.append(path) else: raise ConfigLoadError( 'Workflow directory {} does not exist'.format(path))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode_char_spot(raw_string): """Chop Line from DX-Cluster into pieces and return a dict with the spot data"""
data = {} # Spotter callsign if re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]): data[const.SPOTTER] = re.sub(':', '', re.match('[A-Za-z0-9\/]+[:$]', raw_string[6:15]).group(0)) else: raise ValueError if re.search('[0-9\.]{5,12}', raw_string[10:25]): data[const.FREQUENCY] = float(re.search('[0-9\.]{5,12}', raw_string[10:25]).group(0)) else: raise ValueError data[const.DX] = re.sub('[^A-Za-z0-9\/]+', '', raw_string[26:38]) data[const.COMMENT] = re.sub('[^\sA-Za-z0-9\.,;\#\+\-!\?\$\(\)@\/]+', ' ', raw_string[39:69]).strip() data[const.TIME] = datetime.now().replace(tzinfo=UTC) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode_pc11_message(raw_string): """Decode PC11 message, which usually contains DX Spots"""
data = {} spot = raw_string.split("^") data[const.FREQUENCY] = float(spot[1]) data[const.DX] = spot[2] data[const.TIME] = datetime.fromtimestamp(mktime(strptime(spot[3]+" "+spot[4][:-1], "%d-%b-%Y %H%M"))) data[const.COMMENT] = spot[5] data[const.SPOTTER] = spot[6] data["node"] = spot[7] data["raw_spot"] = raw_string return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode_pc23_message(raw_string): """ Decode PC23 Message which usually contains WCY """
data = {} wcy = raw_string.split("^") data[const.R] = int(wcy[1]) data[const.expk] = int(wcy[2]) data[const.CALLSIGN] = wcy[3] data[const.A] = wcy[4] data[const.SFI] = wcy[5] data[const.K] = wcy[6] data[const.AURORA] = wcy[7] data["node"] = wcy[7] data["ip"] = wcy[8] data["raw_data"] = raw_string return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run(self, data, store, signal, context, *, success_callback=None, stop_callback=None, abort_callback=None): """ The internal run method that decorates the public run method. This method makes sure data is being passed to and from the task. Args: data (MultiTaskData): The data object that has been passed from the predecessor task. store (DataStoreDocument): The persistent data store object that allows the task to store data for access across the current workflow run. signal (TaskSignal): The signal object for tasks. It wraps the construction and sending of signals into easy to use methods. context (TaskContext): The context in which the tasks runs. success_callback: This function is called when the task completed successfully stop_callback: This function is called when a StopTask exception was raised. abort_callback: This function is called when an AbortWorkflow exception was raised. Raises: TaskReturnActionInvalid: If the return value of the task is not an Action object. Returns: Action: An Action object containing the data that should be passed on to the next task and optionally a list of successor tasks that should be executed. """
if data is None: data = MultiTaskData() data.add_dataset(self._name) try: if self._callback_init is not None: self._callback_init(data, store, signal, context) result = self.run(data, store, signal, context) if self._callback_finally is not None: self._callback_finally(TaskStatus.Success, data, store, signal, context) if success_callback is not None: success_callback() # the task should be stopped and optionally all successor tasks skipped except StopTask as err: if self._callback_finally is not None: self._callback_finally(TaskStatus.Stopped, data, store, signal, context) if stop_callback is not None: stop_callback(exc=err) result = Action(data, limit=[]) if err.skip_successors else None # the workflow should be stopped immediately except AbortWorkflow as err: if self._callback_finally is not None: self._callback_finally(TaskStatus.Aborted, data, store, signal, context) if abort_callback is not None: abort_callback(exc=err) result = None signal.stop_workflow() # catch any other exception, call the finally callback, then re-raise except: if self._callback_finally is not None: self._callback_finally(TaskStatus.Error, data, store, signal, context) signal.stop_workflow() raise # handle the returned data (either implicitly or as an returned Action object) by # flattening all, possibly modified, input datasets in the MultiTask data down to # a single output dataset. if result is None: data.flatten(in_place=True) data.add_task_history(self.name) return Action(data) else: if not isinstance(result, Action): raise TaskReturnActionInvalid() result.data.flatten(in_place=True) result.data.add_task_history(self.name) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def latlong_to_locator (latitude, longitude): """converts WGS84 coordinates into the corresponding Maidenhead Locator Args: latitude (float): Latitude longitude (float): Longitude Returns: string: Maidenhead locator Raises: ValueError: When called with wrong or invalid input args TypeError: When args are non float values Example: The following example converts latitude and longitude into the Maidenhead locator 'JN48QM' Note: Latitude (negative = West, positive = East) Longitude (negative = South, positive = North) """
if longitude >= 180 or longitude <= -180: raise ValueError if latitude >= 90 or latitude <= -90: raise ValueError longitude += 180; latitude +=90; locator = chr(ord('A') + int(longitude / 20)) locator += chr(ord('A') + int(latitude / 10)) locator += chr(ord('0') + int((longitude % 20) / 2)) locator += chr(ord('0') + int(latitude % 10)) locator += chr(ord('A') + int((longitude - int(longitude / 2) * 2) / (2 / 24))) locator += chr(ord('A') + int((latitude - int(latitude / 1) * 1 ) / (1 / 24))) return locator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def locator_to_latlong (locator): """converts Maidenhead locator in the corresponding WGS84 coordinates Args: locator (string): Locator, either 4 or 6 characters Returns: tuple (float, float): Latitude, Longitude Raises: ValueError: When called with wrong or invalid input arg TypeError: When arg is not a string Example: The following example converts a Maidenhead locator into Latitude and Longitude 48.5208333333 9.375 Note: Latitude (negative = West, positive = East) Longitude (negative = South, positive = North) """
locator = locator.upper() if len(locator) == 5 or len(locator) < 4: raise ValueError if ord(locator[0]) > ord('R') or ord(locator[0]) < ord('A'): raise ValueError if ord(locator[1]) > ord('R') or ord(locator[1]) < ord('A'): raise ValueError if ord(locator[2]) > ord('9') or ord(locator[2]) < ord('0'): raise ValueError if ord(locator[3]) > ord('9') or ord(locator[3]) < ord('0'): raise ValueError if len(locator) == 6: if ord(locator[4]) > ord('X') or ord(locator[4]) < ord('A'): raise ValueError if ord (locator[5]) > ord('X') or ord(locator[5]) < ord('A'): raise ValueError longitude = (ord(locator[0]) - ord('A')) * 20 - 180 latitude = (ord(locator[1]) - ord('A')) * 10 - 90 longitude += (ord(locator[2]) - ord('0')) * 2 latitude += (ord(locator[3]) - ord('0')) if len(locator) == 6: longitude += ((ord(locator[4])) - ord('A')) * (2 / 24) latitude += ((ord(locator[5])) - ord('A')) * (1 / 24) # move to center of subsquare longitude += 1 / 24 latitude += 0.5 / 24 else: # move to center of square longitude += 1; latitude += 0.5; return latitude, longitude
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_heading(locator1, locator2): """calculates the heading from the first to the second locator Args: locator1 (string): Locator, either 4 or 6 characters locator2 (string): Locator, either 4 or 6 characters Returns: float: Heading in deg Raises: ValueError: When called with wrong or invalid input arg AttributeError: When args are not a string Example: The following calculates the heading from locator1 to locator2 74.3136 """
lat1, long1 = locator_to_latlong(locator1) lat2, long2 = locator_to_latlong(locator2) r_lat1 = radians(lat1) r_lon1 = radians(long1) r_lat2 = radians(lat2) r_lon2 = radians(long2) d_lon = radians(long2 - long1) b = atan2(sin(d_lon)*cos(r_lat2),cos(r_lat1)*sin(r_lat2)-sin(r_lat1)*cos(r_lat2)*cos(d_lon)) # bearing calc bd = degrees(b) br,bn = divmod(bd+360,360) # the bearing remainder and final bearing return bn