docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Deletes the pipeline specified by the key Args: returns (status code for the DELETE request, success message dict) expect (200 , {'success': 'true'}) for successful execution}
def delete_pipeline(self, pipeline_key): if pipeline_key: uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key ]) return self._req('delete', uri) else: return requests.codes.bad_request, None
1,109,413
Deletes all pipelines Args: returns OK for overall success or last error code, resp data.
def delete_all_pipelines(self): code, data = self.get_pipeline() if code == requests.codes.ok: for pl_data in data: c, d = self.delete_pipeline(pl_data['pipelineKey']) if c != requests.codes.ok: code = c data = d return code, data
1,109,414
Creates a pipeline with the provided attributes. Args: name required name string kwargs {name, description, orgWide, aclEntries} user specifiable ones only return (status code, pipeline_dict) (as created)
def create_pipeline(self, name, description, **kwargs): #req sanity check if not (name and description): return requests.codes.bad_request, None kwargs.update({'name':name, 'description':description}) new_pl = StreakPipeline(**kwargs) uri = '/'.join([ self.api_uri, self.pipelines_suffix ]) code, r_data = self._req('put', uri, new_pl.to_dict()) return code, r_data
1,109,415
Updates a pipeline with the provided attributes. Args: key required identifier for the pipeline pipeline StreakPipeline object return (status code, pipeline_dict)
def update_pipeline(self, pipeline): #req sanity check payload = None if type(pipeline) is not StreakPipeline: return requests.codes.bad_request, None payload = pipeline.to_dict(rw = True) try: uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline.attributes['pipelineKey'] ]) except KeyError: return requests.codes.bad_request, None code, r_data = self._req('post', uri , json.dumps(payload)) return code, r_data
1,109,416
Gets a list of one/all box objects. Performs a single GET. To go deeper individual boxes need to be polled for their contents. This is a directory for what we could ask for. Args: box_key key for the target box (default: None i.e. ALL) sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp' returns (status code for the GET request, dict of box or a list thereof)
def get_box(self, box_key = None, sort_by = None): uri = '/'.join([ self.api_uri, self.boxes_suffix ]) if box_key: uri = '/'.join([ uri, box_key ]) if sort_by: if sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']: uri += self.sort_by_postfix + sort_by else: return requests.codes.bad_request, {'success' : 'False', 'error': 'sortBy needs to be \'creationTimestamp\', or \'lastUpdatedTimestamp\''} return self._req('get', uri)
1,109,417
Gets a list of all box objects in a pipeline. Performs a single GET. Args: pipeline_key key for pipeline sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp' Not sure if it is supported returns (status code for the GET request, dict of boxes)
def get_pipeline_boxes(self, pipeline_key, sort_by = None): if not pipeline_key: return requests.codes.bad_request, None uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key ]) if sort_by: if sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']: uri += self.sort_by_postfix + sort_by else: return requests.codes.bad_request, {'success' : 'False', 'error': 'sortBy needs to be \'creationTimestamp\', or \'lastUpdatedTimestamp\''} return self._req('get', uri)
1,109,418
Deletes the box specified by the key Args: returns (status code for the DELETE request, success message dict)
def delete_box(self, key): if key: uri = self.box_root_uri + '/' + key return self._req('delete', uri) else: return requests.codes.bad_request, None
1,109,419
Creates a box int the pipeline specified with the provided attributes. Args: name required name string kwargs {...} see StreakBox object for details return (status code, box dict)
def create_pipeline_box(self, pipeline_key, name, **kwargs): #req sanity check if not (pipeline_key and name): return requests.codes.bad_request, None uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.boxes_suffix ]) kwargs.update({'name':name}) new_box = StreakBox(**kwargs) code, data = self._req('put', uri, new_box.to_dict(rw = True)) return code, data
1,109,420
Updates a box with the provided attributes. Args: box StreakBox object with updated info return (status code, box in dict form)
def update_box(self, box): #req sanity check payload = None if type(box) is not StreakBox: return requests.codes.bad_request, None payload = box.to_dict(rw = True) try: uri = self.box_root_uri + '/' + box.attributes['boxKey'] except KeyError: return requests.codes.bad_request, None code, data = self._req('post', uri , json.dumps(payload)) return code, data
1,109,421
Takes a keyword and returns the search results. Works for boxes only? Args: kw keyword (str) to search for. return (code, list(dicts))
def search(self, kw): if not kw: return requests.codes.bad_request, None code, data = self._req('get', self.search_uri + kw) return code, data
1,109,422
Get all/one specific snippet by its key Args: key snippet key (default: None i.e. ALL) return (status code, snippet dict or list thereof)
def get_snippet(self, snippet_key = None): uri = '/'.join([ self.api_uri, self.snippets_suffix ]) if snippet_key: uri = '/'.join([ uri, snippet_key ]) code, data = self._req('get', uri) return code, data
1,109,423
Gets a list of one/all stage objects in a pipeline. Performs a single GET. Args: pipeline_key key for pipeline stage_key key for stage (default: None i.e. ALL) sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp' may or may not be supported returns (status code for the GET request, dict of stages) It is not a list hence the .values() before return
def get_pipeline_stage(self, pipeline_key, stage_key = None, sort_by = None): if not pipeline_key: return requests.codes.bad_request, None uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.stages_suffix ]) if stage_key: uri = '/'.join([ uri, stage_key ]) if sort_by: if sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']: uri += self.sort_by_postfix + sort_by else: return requests.codes.bad_request, {'success' : 'False', 'error': 'sortBy needs to be \'creationTimestamp\', or \'lastUpdatedTimestamp\''} code, data = self._req('get', uri) #format is ambigious so we need to rely on user input if stage_key: data = list(data.values()) return code, data
1,109,424
Creates a pipeline stage with the provided attributes. Args: name required name string kwargs {..} see StreakStage object for details return (status code, stage dict)
def create_pipeline_stage(self, pipeline_key, name, **kwargs): #req sanity check if not (pipeline_key and name): return requests.codes.bad_request, None uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.stages_suffix]) kwargs.update({'name':name}) new_box = StreakStage(**kwargs) code, data = self._req('put', uri, new_box.to_dict(rw = True)) return code, data
1,109,425
Deletes a stage in the pipeline by stage key and pipeline key Args: pipeline_key key for pipeline stage_key key for stage sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp' returns (status code for the GET request, dict of op report)
def delete_pipeline_stage(self, pipeline_key, stage_key, sort_by = None): if not (pipeline_key and stage_key): return requests.codes.bad_request, None uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.stages_suffix, stage_key ]) code, data = self._req('delete', uri) return code, data
1,109,426
Updates a box with the provided attributes. Args: pipeline_key reqiured identifier for the pipeline stage StreakStage object kwargs {name} return (status code, stage dict)
def update_pipeline_stage(self, stage): #req sanity check payload = None if type(stage) is not StreakStage: return requests.codes.bad_request, None payload = stage.to_dict(rw = True) #print(new_pl.attributes) #print(new_pl.to_dict()) #raw_input() try: uri = '/'.join([self.api_uri, self.pipelines_suffix, stage.attributes['pipelineKey'], self.stages_suffix, stage.attributes['key'] ]) except KeyError: return requests.codes.bad_request, None code, data = self._req('post', uri , json.dumps(payload)) return code, data
1,109,427
Creates a field with the provided attributes. Args: uri base uri for the field (pipeline or box uri) name required name string field_type required type string [TEXT_INPUT, DATE or PERSON] kwargs {} return (status code, field dict)
def _create_field(self, uri , name, field_type, **kwargs): #req sanity check if not (name and (field_type in ['TEXT_INPUT', 'DATE', 'PERSON'])): return requests.codes.bad_request, {'success' : 'False', 'error': 'name needs to be provided and field_type needs to be \'TEXT_INPUT\', \'DATE\' or \'PERSON\''} kwargs.update({'name':name, 'type':field_type}) new_box = StreakField(**kwargs) #print(new_pl.attributes) #print(new_pl.to_dict()) #raw_input() code, data = self._req('put', uri, new_box.to_dict(rw = True)) return code, data
1,109,428
Updates a field with the provided attributes. Args: key reqiured identifier for the pipeline or box field StreakField object kwargs {name, type} see StreakField for details return (status code, field dict)
def _update_field(self, uri, field): #req sanity check payload = None if type(field) is not StreakField: return requests.codes.bad_request, None payload = field.to_dict(rw = True) #print(new_pl.attributes) #print(new_pl.to_dict()) #raw_input() try: uri = '/'.join([ uri, field.attributes['key'] ]) except KeyError: return requests.codes.bad_request, None code, data = self._req('post', uri , json.dumps(payload)) return code, data
1,109,429
Gets one/all field in a pipeline Args: pipeline_key key for pipeline field_key key for field (default: None i.e. ALL) returns status code, field dict or list thereof
def get_pipeline_field(self, pipeline_key, field_key = None): uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.fields_suffix ]) if field_key: uri = '/'.join([uri, field_key]) return self._req('get', uri)
1,109,430
Creates a pipeline field with the provided attributes. Args: pipeline_key specifying the pipeline to add the field to name required name string field_type required type string [TEXT_INPUT, DATE or PERSON] kwargs {} return (status code, field dict)
def create_pipeline_field(self, pipeline_key, name, field_type, **kwargs): uri = '/'.join([self.api_uri, self.pipelines_suffix, pipeline_key, self.fields_suffix ]) code, data = self._create_field(uri, name, field_type, **kwargs) return code, data
1,109,431
Upates pipeline field as specified Args: pipeline_key key for pipeline where the fields lives field StreakField object with fresh data returns (status code, updated field dict)
def update_pipeline_field(self, pipeline_key, field): uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.fields_suffix ]) return self._update_field(uri, field)
1,109,432
Gets one/all field in a box Args: box_key key for pipeline field_key key for field (default: None i.e. ALL) returns status code, field dict or list thereof
def get_box_field(self, box_key, field_key = None): #does not work self._raise_unimplemented_error() uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.fields_suffix ]) if field_key: uri = '/'.join([uri, field_key]) return self._req('get', uri)
1,109,433
Creates a box field with the provided attributes. Args: box_key specifying the box to add the field to name required name string field_type required type string [TEXT_INPUT, DATE or PERSON] kwargs {} return (status code, field dict)
def create_box_field(self, box_key, name, field_type, **kwargs): #does not work self._raise_unimplemented_error() uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.fields_suffix ]) code, data = self._create_field(uri, name, field_type, **kwargs) return code, data
1,109,434
Upates box field as specified Args: box_key key for pipeline where the fields lives field StreakField object with fresh data returns (status code, updated field dict)
def update_box_field(self, box_key, field): #does not work self._raise_unimplemented_error() uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.fields_suffix ]) return self._update_field(uri, field)
1,109,435
General purpose function to get newsfeeds Args: uri uri for the feed base detail_level arguments for req str ['ALL', 'CONDENSED'] return list of feed dicts parse at your convenience
def _get_newsfeeds(self, uri, detail_level = None): if detail_level: if detail_level not in ['ALL', 'CONDENSED']: return requests.codes.bad_request, {'success' : 'False', 'error': 'detailLevel needs to be provided and field_type needs to be \'ALL\' or \'CONDENSED\''} uri += self.detail_level_suffix + detail_level return self._req('get', uri)
1,109,436
Function to get newsfeed for a pipeline Args: pipeline_key pipeline key detail_level arguments for req str ['ALL', 'CONDENSED'] return list of feed dicts parse at your convenience
def get_pipeline_newsfeeds(self, pipeline_key, detail_level = None): uri = '/'.join([ self.api_uri, self.pipelines_suffix, pipeline_key, self.newsfeed_suffix ]) return self._get_newsfeeds(uri, detail_level)
1,109,437
Function to get newsfeed for a pipeline Args: box pipeline key detail_level arguments for req str ['ALL', 'CONDENSED'] return list of feed dicts parse at your convenience
def get_box_newsfeeds(self, box_key, detail_level = None): uri = '/'.join([ self.api_uri, self.boxes_suffix, box_key, self.newsfeed_suffix ]) return self._get_newsfeeds(uri, detail_level)
1,109,438
Gets a thread specified by thread_key Args: thread_key thread to get returns a thread dict
def get_thread(self, thread_key): uri = '/'.join([self.api_uri, self.threads_suffix, thread_key ]) return self._req('get', uri)
1,109,439
Gets all threads in a specified box Args: box_key box to look in returns a list of thread dicts
def get_box_threads(self, box_key): uri = '/'.join([ self.api_uri, self.boxes_suffix, box_key, self.threads_suffix ]) return self._req('get', uri)
1,109,440
Creates a comments in a box with the provided attributes. Args: box_key key for box message message string kwargs {} see StreakComment object for more information return (status code, comment dict)
def create_box_comments(self, box_key, message, **kwargs): uri = '/'.join([ self.api_uri, self.boxes_suffix, box_key, self.comments_suffix ]) if not (box_key and message): return requests.codes.bad_request, None kwargs.update({'message':message}) new_cmt = StreakComment(**kwargs) #print(new_pl.attributes) #print(new_pl.to_dict()) #raw_input() code, r_data = self._req('put', uri, new_cmt.to_dict()) return code, r_data
1,109,441
Gets comments in a box with the provided attributes. Args: box_key key for box return (status code, list of comment dicts)
def get_box_comments(self, box_key): uri = '/'.join([ self.api_uri, self.boxes_suffix, box_key, self.comments_suffix ]) return self._req('get', uri)
1,109,442
Deletes comment in a box with the comment_key Args: box_key key for box return (status code, list of comment dicts)
def delete_box_comment(self, box_key, comment_key): #does not work self._raise_unimplemented_error() uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.comments_suffix, comment_key ]) return self._req('delete', uri)
1,109,443
Creates a reminder with the provided attributes. Args: box_key specifying the box to add the field to message message for the reminder remind_date date to remind on in ticks. remind_followers true/false kwargs {..} see StreakReminder object for details return (status code, reminder dict)
def create_box_reminder(self, box_key, message, remind_date, remind_follwers, **kwargs): uri = '/'.join([ self.api_uri, self.boxes_suffix, box_key, self.reminders_suffix ]) kwargs.update({ 'message':message, 'remindDate':remind_date, 'remindFollowers': remind_follwers}) new_rem = StreakReminder(**kwargs) code, data = self._req('put', uri, new_rem.to_dict(rw = True)) return code, data
1,109,444
Creates a reminder with the provided attributes. Args: reminder updated reminder of StreakReminder type return (status code, reminder dict)
def update_reminder(self, reminder): uri = '/'.join([self.api_uri, self.reminders_suffix, ]) #req sanity check payload = None if type(reminder) is not StreakReminder: return requests.codes.bad_request, None payload = reminder.to_dict(rw = True) try: uri = '/'.join([uri, reminder.attributes['key']]) except KeyError: return requests.codes.bad_request, None code, data = self._req('post', uri , json.dumps(payload)) return code, data
1,109,445
Gets all reminders for a box Args: reminder updated reminder of StreakReminder type return (status code, reminder dict)
def get_box_reminders(self, box_key): #required sanity check if box_key: return requests.codes.bad_request, None uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.reminders_suffix ]) return self._req('get', uri)
1,109,446
Gets one reminder Args: reminder_key key for the reminder to get return (status code, reminder dict)
def get_reminder(self, reminder_key): #required sanity check if reminder_key: return requests.codes.bad_request, None uri = '/'.join([ self.api_uri, self.reminders_suffix, reminder_key ]) return self._req('get', uri)
1,109,447
Gets file information Args: file_key key for the file to get return (status code, dict of file info)
def get_file(self, file_key): uri = '/'.join([ self.api_uri, self.files_suffix, file_key ]) return self._req('get', uri)
1,109,448
Gets file contents Args: file_key key for the file return (status code, ?)
def get_file_contents(self, file_key): #does not work self._raise_unimplemented_error() uri = '/'.join([self.api_uri, self.files_suffix, file_key, self.file_contents_suffix, ]) return self._req('get', uri)
1,109,449
Gets link to file Args: file_key key for the file return (status code, ?)
def get_file_link(self, file_key): #does not work self._raise_unimplemented_error() uri = '/'.join([self.api_uri, self.files_suffix, file_key, self.file_link_suffix, ]) return self._req('get', uri)
1,109,450
Gets to file infos in a single box. Args: box_key key for the file return (status code, list of file info dicts)
def get_box_files(self, box_key): uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.files_suffix ]) return self._req('get', uri)
1,109,451
Dips the brush in paint. Arguments: index - an integer between 0 and 7, inclusive. Tells the bot which color you want.
def get_color(index): if index in range(0, 8): # Send the turtle to the top-left corner of the window to imitate the position of the WCB's brush. state['turtle'].goto(-WCB_WIDTH / 2, -WCB_HEIGHT / 2) _make_cnc_request("tool.color./" + str(index)) # This is the order of the colors in the palette in our classroom's bot; yours may vary! colors = ["black", "red", "orange", "yellow", "green", "blue", "purple", "brown"] state['turtle'].color(colors[index]) state['distance_traveled'] = 0 else: print("Color indexes must be between 0 and 7, but you gave me: " + index)
1,109,511
Moves the brush to a particular position. Arguments: x - a number between -250 and 250. y - a number between -180 and 180.
def move_to(x, y): _make_cnc_request("coord/{0}/{1}".format(x, y)) state['turtle'].goto(x, y)
1,109,512
Turns the brush's "turtle" to the left. Arguments: relative_angle - a number like 10. A bigger number makes the turtle turn farther to the left.
def turn_left(relative_angle): assert int(relative_angle) == relative_angle, "turn_left() only accepts integers, but you gave it " + str(relative_angle) _make_cnc_request("move.left./" + str(relative_angle)) state['turtle'].left(relative_angle)
1,109,513
Turns the brush's "turtle" to the right. Arguments: relative_angle - a number like 10. A bigger number makes the turtle turn farther to the right.
def turn_right(relative_angle): assert int(relative_angle) == relative_angle, "turn_right() only accepts integers, but you gave it " + str(relative_angle) _make_cnc_request("move.right./" + str(relative_angle)) state['turtle'].right(relative_angle)
1,109,514
Applies an instance method with name `fqdn` to `o`. Args: fqdn (str): fully-qualified domain name of the object. o: object to apply instance method to.
def _instance_transform(fqdn, o, *args, **kwargs): return _package_transform(o, fqdn, start=0, *args, **kwargs)
1,109,522
Enables/disables logs to be written to files Arguments: set_file (:obj:`bool`): False disables, True enables
def set_log_rotate_handler(self, set_file): if hasattr(self, 'debug_handler'): if set_file: self.log.addHandler(self.debug_handler) self.log.addHandler(self.error_handler) else: try: self.log.removeHandler(self.error_handler) self.log.removeHandler(self.debug_handler) except Exception: pass else: self.log.debug('The file log handlers were not created. It is not\ possible to write to the log files.')
1,109,813
Configures class log level Arguments: log_level (:obj:`str`): log level ('NOTSET','DEBUG','INFO' 'WARNING', 'ERROR', 'CRITICAL')
def set_log_level(self, log_level): if log_level == 'DEBUG': self.log.setLevel(logging.DEBUG) self.log.debug("Changing log level to "+log_level) elif log_level == 'INFO': self.log.setLevel(logging.INFO) self.log.info("Changing log level to "+log_level) elif log_level == 'WARNING': self.log.setLevel(logging.WARNING) self.log.warning("Changing log level to "+log_level) elif log_level == 'ERROR': self.log.setLevel(logging.ERROR) self.log.error("Changing log level to "+log_level) elif log_level == 'CRITICAL': self.log.setLevel(logging.CRITICAL) self.log.critical("Changing log level to "+log_level) elif log_level == 'NOTSET': self.log.setLevel(logging.NOTSET) else: raise NotImplementedError('Not implemented log level '+str(log_level))
1,109,814
Configures log format Arguments: log_type (:obj:`str`): log type (error, debug or stream) log_format (:obj:`str`): log format (ex:"Log: %(message)s | Log level:%(levelname)s | Date:%(asctime)s',datefmt='%m/%d/%Y %I:%M:%S")
def set_log_format(self, log_type, log_format): if not (log_type == 'error' or log_type == 'stream' or log_type == 'debug'): self.log.debug('Log type must be error, stream, or debug') else: self.default_formatter = logging.Formatter(log_format) if log_type == 'error': self.error_handler.setFormatter(self.default_formatter) elif log_type == 'debug': self.debug_handler.setFormatter(self.default_formatter) elif log_type == 'stream': self.stream_handler.setFormatter(self.default_formatter)
1,109,815
Returns a list of all the projects and tasks available in the `acorn` database directory. Args: target (str): directory to list the projects for. Defaults to the configured database directory. Returns: dict: keys are project names; values are lists of tasks associated with the project.
def list_tasks(target=None): from os import getcwd, chdir from glob import glob original = getcwd() if target is None:# pragma: no cover target = _dbdir() chdir(target) result = {} for filename in glob("*.*.json"): project, task = filename.split('.')[0:2] if project not in result: result[project] = [] result[project].append(task) #Set the working directory back to what it was. chdir(original) return result
1,110,069
Sets the active project and task. All subsequent logging will be saved to the database with that project and task. Args: project_ (str): active project name; a project can have multiple tasks. task_ (str): active task name. Logging is separated at the project and task level.
def set_task(project_, task_): global project, task project = project_ task = task_ msg.okay("Set project name to {}.{}".format(project, task), 2)
1,110,070
Returns the :class:`Instance` of the specified object if it is one that we track by default. Args: obj (object): any python object passed as an argument to a method. Returns: Instance: if the object is trackable, the Instance instance of that object; else None.
def tracker(obj): import types as typ global oids, uuids import six from inspect import isclass untracked = (six.string_types, six.integer_types, float, complex, six.text_type) semitrack = (list, dict, set, tuple) if six.PY3: # pragma: no cover semitrack = semitrack + (range, filter, map) if (isinstance(obj, semitrack) and all([isinstance(t, untracked) for t in obj])): if len(obj) > 0: semiform = "{0} len={1:d} min={2} max={3}" return semiform.format(type(obj), len(obj), min(obj), max(obj)) else: semiform = "{0} len={1:d}" return semiform.format(type(obj), len(obj)) elif isinstance(obj, semitrack): #We have to run the tracker on each of the elements in the list, set, #dict or tuple; this is necessary so that we can keep track of #subsequent calls made with unpacked parts of the tuple. result = [] #If we get a list of 10K tuples (like plot points in matplotlib), then #this pollutes the database. So, we restrict the maximum size of complex #lists to be 5; we track the first 5 objects and then store a summary of #the remaining information. for o in obj[0:min((len(obj), 5))]: track = tracker(o) if isinstance(track, Instance): result.append(track.uuid) else: result.append(track) if len(obj) > 5: result.append("... ({0:d} items)".format(len(obj))) return tuple(result) elif isinstance(obj, slice): return "slice({}, {}, {})".format(obj.start, obj.stop, obj.step) elif type(obj) is type: return obj.__name__ elif type(obj) is typ.LambdaType: if hasattr(obj, "__fqdn__"): #We need to get the actual fqdn of the object *before* it was #decorated. return obj.__fqdn__ else: if six.PY2: _code = obj.func_code else: # pragma: no cover _code = obj.__code__ return "lambda ({})".format(', '.join(_code.co_varnames)) elif type(obj) in [typ.FunctionType, typ.MethodType]: # pragma: no cover return obj.__name__ elif not isinstance(obj, untracked): #For many of the numpy/scipy methods, the result is a tuple of numpy #arrays. In that case, we should maintain the tuple structure for #descriptive purposes, but still return a tracker. oid = id(obj) if oid in oids: result = oids[oid] else: result = Instance(oid, obj) oids[oid] = result uuids[result.uuid] = result return result else: return None
1,110,072
Cleans the specified python `dict` by converting any tuple keys to strings so that they can be serialized by JSON. Args: d (dict): python dictionary to clean up. Returns: dict: cleaned-up dictionary.
def _json_clean(d): result = {} compkeys = {} for k, v in d.items(): if not isinstance(k, tuple): result[k] = v else: #v is a list of entries for instance methods/constructors on the #UUID of the key. Instead of using the composite tuple keys, we #switch them for a string using the key = "c.{}".format(id(k)) result[key] = v compkeys[key] = k return (result, compkeys)
1,110,074
Saves the specified image to disk. Args: byteio (bytes): image bytes to save to disk. imgfmt (str): used as the extension of the saved file. Returns: str: a uuid for the saved image that can be added to the database entry.
def save_image(byteio, imgfmt): from os import path, mkdir ptdir = "{}.{}".format(project, task) uuid = str(uuid4()) #Save the image within the project/task specific folder. idir = path.join(dbdir, ptdir) if not path.isdir(idir): mkdir(idir) ipath = path.join(idir, "{}.{}".format(uuid, imgfmt)) with open(ipath, 'wb') as f: f.write(byteio) return uuid
1,110,075
Records the specified entry to the key-value store under the specified entity key. Args: ekey (str): fqdn/uuid of the method/object to store the entry for. entry (dict): attributes and values gleaned from the execution. diff (bool): when True, the "c" element of `entry` will be diffed against previous entries under the same `ekey` if their method (attribute "m") matches.
def record(ekey, entry, diff=False): taskdb = active_db() taskdb.record(ekey, entry, diff) # The task database save method makes sure that we only save as often as # specified in the configuration file. taskdb.save()
1,110,077
Logs the object with the specified `uuid` to `self.uuids` if possible. Args: uuid (str): string value of :meth:`uuid.uuid4` value for the object.
def log_uuid(self, uuid): #We only need to try and describe an object once; if it is already in #our database, then just move along. if uuid not in self.uuids and uuid in uuids: self.uuids[uuid] = uuids[uuid].describe()
1,110,079
Records the specified entry to the key-value store under the specified entity key. Args: ekey (str): fqdn/uuid of the method/object to store the entry for. entry (dict): attributes and values gleaned from the execution. diff (bool): when True, the "c" element of `entry` will be diffed against previous entries under the same `ekey` if their method (attribute "m") matches.
def record(self, ekey, entry, diff=False): if ekey not in self.entities: self.entities[ekey] = [] #See if we need to diff the code to compress it. if diff and len(self.entities[ekey]) > 0: #Compress the code element of the current entry that we are saving. from acorn.logging.diff import cascade, compress sequence = [e["c"] for e in self.entities[ekey] if e["m"] == entry["m"]] original = cascade(sequence) difference = compress(original, entry["c"]) #Now, overwrite the entry with the compressed version. entry["c"] = difference self.entities[ekey].append(entry) #We also need to make sure we have uuids and origin information stored #for any uuids present in the parameter string. from uuid import UUID uid = None if entry["r"] is not None: uid = entry["r"] elif isinstance(ekey, str): #For many methods we don't duplicate the UUID in the returns part #because it wastes space. In those cases, the ekey is a UUID. try: uid = str(UUID(ekey)) except ValueError: # pragma: no cover pass if uid is not None and isinstance(uid, str): self.log_uuid(uid) #For the markdown and function definitions, we don't have any arguments, #so we set that to None to save space. if entry["a"] is None: return for larg in entry["a"]["_"]: #We use the constructor to determine if the format of the argument #is a valid UUID; if it isn't then we catch the error and keep #going. if not isinstance(larg, str): continue try: uid = str(UUID(larg)) self.log_uuid(uid) except ValueError: #This was obviously not a UUID, we don't need to worry about it, #it has a user-readable string instead. pass #We also need to handle the keyword arguments; these are keyed by name. for key, karg in entry["a"].items(): if key == "_" or not isinstance(karg, str): #Skip the positional arguments since we already handled them. continue try: uid = str(UUID(karg)) self.log_uuid(uid) except ValueError: pass
1,110,080
Serializes the database file to disk. Args: force (bool): when True, the elapsed time since last save is ignored and the database is saved anyway (subject to global :data:`writeable` setting).
def save(self, force=False): from time import time # Since the DBs can get rather large, we don't want to save them every # single time a method is called. Instead, we only save them at the # frequency specified in the global settings file. from datetime import datetime savefreq = TaskDB.get_option("savefreq", 2, int) if self.lastsave is not None: delta = (datetime.fromtimestamp(time()) - datetime.fromtimestamp(self.lastsave)) elapsed = int(delta.total_seconds()/60) else: elapsed = savefreq + 1 if elapsed > savefreq or force: if not writeable: #We still overwrite the lastsave value so that this message doesn't #keep getting output for every :meth:`record` call. self.lastsave = time() msg.std("Skipping database write to disk by setting.", 2) return import json try: entities, compkeys = _json_clean(self.entities) jdb = {"entities": entities, "compkeys": compkeys, "uuids": self.uuids} with open(self.dbpath, 'w') as f: json.dump(jdb, f) except: # pragma: no cover from acorn.msg import err import sys raise err("{}: {}".format(*sys.exc_info()[0:2])) self.lastsave = time()
1,110,083
Stops all active threads and rejects new tasks to be added Args: block (bool): If True, block until all threads are closed
def stop(self, block=True): self._stop = True # Removing tasks in queue self.empty_queue() # All active threads # With the DoNothing function # Because self._stop is True each thread will process at most one of the DoNothing functions # Hence it is ensured that all .get calls are triggered for _ in range(self.threads_active()): self._queue.put(SetPrio(target=DoNothing)) if block: # Blocking until all threads are closed self.join() # Removing any leftover DoNothing functions (Can only be reliably done when all threads are closed) self.empty_queue()
1,110,163
Get the appropriate supervisor to use and pre-apply the function. Args: func: A function.
def get_supervisor(func: types.AnyFunction) -> types.Supervisor: if not callable(func): raise TypeError("func is not callable") if asyncio.iscoroutinefunction(func): supervisor = _async_supervisor else: supervisor = _sync_supervisor return functools.partial(supervisor, func)
1,111,333
Supervisor for running an animation with an asynchronous function. Args: func: A function to be run alongside an animation. animation_: An infinite generator that produces strings for the animation. step: Seconds between each animation frame. *args: Arguments for func. **kwargs: Keyword arguments for func. Returns: The result of func(*args, **kwargs) Raises: Any exception that is thrown when executing func.
async def _async_supervisor(func, animation_, step, *args, **kwargs): with ThreadPoolExecutor(max_workers=2) as pool: with _terminating_event() as event: pool.submit(animate_cli, animation_, step, event) result = await func(*args, **kwargs) return result
1,111,334
Analyzes the result of a generic fit operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call.
def fit(fqdn, result, *argl, **argd): #Check the arguments to see what kind of data we are working with, then #choose the appropriate function below to return the analysis dictionary. #The first positional argument will be the instance of the machine that was #used. Check its name against a list. global _machines out = None if len(argl) > 0: machine = argl[0] #We save pointers to the machine that was just fit so that we can figure #out later what training data was used for analysis purposes. key = id(machine) _machines[key] = (machine, argl[0], argl[1]) if isclassifier(machine): out = classify_fit(fqdn, result, *argl, **argd) elif isregressor(machine): out = regress_fit(fqdn, result, *argl, **argd) return out
1,111,456
Analyzes the result of a generic predict operation performed by `sklearn`. Args: fqdn (str): full-qualified name of the method that was called. result: result of calling the method with `fqdn`. argl (tuple): positional arguments passed to the method call. argd (dict): keyword arguments passed to the method call.
def predict(fqdn, result, *argl, **argd): #Check the arguments to see what kind of data we are working with, then #choose the appropriate function below to return the analysis dictionary. out = None if len(argl) > 0: machine = argl[0] if isclassifier(machine): out = classify_predict(fqdn, result, None, *argl, **argd) elif isregressor(machine): out = regress_predict(fqdn, result, None, *argl, **argd) return out
1,111,457
Performs the generic fit tests that are common to both classifier and regressor; uses `scorer` to score the predicted values given by the machine when tested against its training set. Args: scorer (function): called on the result of `machine.predict(Xtrain, ytrain)`.
def _generic_fit(fqdn, result, scorer, yP=None, *argl, **argd): out = None if len(argl) > 0: machine = argl[0] out = {} if hasattr(machine, "best_score_"): out["score"] = machine.best_score_ #With fitting it is often useful to know how well the fitting set was #matched (by trying to predict a score on it). We can do this #automatically and show the result to the user. yL = _do_auto_predict(*argl[0:2]) yscore = scorer(fqdn, yL, yP, *argl, **argd) if yscore is not None: out.update(yscore) return out
1,111,459
Returns the percent match for the specified prediction call; requires that the data was split before using an analyzed method. Args: out (dict): output dictionary to save the result to.
def _percent_match(result, out, yP=None, *argl): if len(argl) > 1: if yP is None: Xt = argl[1] key = id(Xt) if key in _splits: yP = _splits[key][3] if yP is not None: import math out["%"] = round(1.-sum(abs(yP - result))/float(len(result)), 3)
1,111,461
Register a new Subscription on this collection's parent object. Args: callback_url (str): URI of an active endpoint which can receive notifications. Returns: A round.Subscription object if successful.
def create(self, callback_url): resource = self.resource.create({'subscribed_to': 'address', 'callback_url': callback_url}) subscription = self.wrap(resource) self.add(subscription) return subscription
1,111,590
Convert one line from the extended log to dict. Args: line (str): Line which will be converted. Returns: dict: dict with ``timestamp``, ``command``, ``username`` and ``path`` \ keys. Note: Typical line looks like this:: /home/ftp/xex/asd bsd.dat, xex, STOR, 1398351777 Filename may contain ``,`` character, so I am ``rsplitting`` the line from the end to the beginning.
def _parse_line(line): line, timestamp = line.rsplit(",", 1) line, command = line.rsplit(",", 1) path, username = line.rsplit(",", 1) return { "timestamp": timestamp.strip(), "command": command.strip(), "username": username.strip(), "path": path, }
1,111,683
Process the extended ProFTPD log. Args: file_iterator (file): any file-like iterator for reading the log or stdin (see :func:`_read_stdin`). Yields: ImportRequest: with each import.
def process_log(file_iterator): for line in file_iterator: if "," not in line: continue parsed = _parse_line(line) if not parsed["command"].upper() in ["DELE", "DEL"]: continue # don't react to anything else, than trigger in form of deleted # "lock" file if os.path.basename(parsed["path"]) != settings.LOCK_FILENAME: continue # react only to lock file in in home directory dir_name = os.path.dirname(parsed["path"]) if settings.LOCK_ONLY_IN_HOME: if dir_name != settings.DATA_PATH + parsed["username"]: continue # deleted user if not os.path.exists(os.path.dirname(parsed["path"])): continue # old record, which doesn't need to be parsed again if os.path.exists(parsed["path"]): continue logger.info( "Request for processing from user '%s'." % parsed["username"] ) yield process_import_request( username=parsed["username"], path=os.path.dirname(parsed["path"]), timestamp=parsed["timestamp"], logger_handler=logger )
1,111,684
resolve prefix to a namespaceURI. If None or empty str, return default namespace or None. Parameters: celt -- element node prefix -- xmlns:prefix, or empty str or None
def _resolve_prefix(celt, prefix): namespace = None while _is_element(celt): if prefix: namespaceURI = _find_xmlns_prefix(celt, prefix) else: namespaceURI = _find_default_namespace(celt) if namespaceURI: break celt = celt.parentNode else: if prefix: raise EvaluateException, 'cant resolve xmlns:%s' %prefix return namespaceURI
1,112,027
Return the index of a fieldset in the ``fieldsets`` list. Args: fieldsets (list): The original ``fieldsets`` list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Returns: (int) The index of the fieldset in the ``fieldsets`` list.
def get_fieldset_index(fieldsets, index_or_name): if isinstance(index_or_name, six.integer_types): return index_or_name for key, value in enumerate(fieldsets): if value[0] == index_or_name: return key raise KeyError("Key not found: '{}'.".format(index_or_name))
1,112,075
Return the index of an element in the list. Args: lst (list): The list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Returns: (int) The index of the element in the list.
def get_list_index(lst, index_or_name): if isinstance(index_or_name, six.integer_types): return index_or_name return lst.index(index_or_name)
1,112,076
Returns n unique and "evenly" spaced colors for the backgrounds of the projects. Args: n (int): The number of unique colors wanted. Returns: colors (list of str): The colors in hex form.
def _get_colors(n): import matplotlib.pyplot as plt from matplotlib.colors import rgb2hex as r2h from numpy import linspace cols = linspace(0.05, .95, n) cmap = plt.get_cmap('nipy_spectral') return [r2h(cmap(i)) for i in cols]
1,112,450
Returns a dictionaries in which each project is a key and the tasks are stored as a list within that dictionaly element. Args: path (str): The path to the folder containing the *.json files. Returns: projects (list of dict): A dictionary in which each project is a key containing a list of it's tasks.
def _make_projcet_list(path): from collections import OrderedDict from matplotlib.colors import LinearSegmentedColormap from matplotlib.colors import rgb2hex as r2h from numpy import linspace proj = [] projects = OrderedDict() file_list = os.listdir(path) for files in file_list: if files.split(".")[0] not in proj and 'json' in files and "#" not in files and "~" not in files: proj.append(files.split(".")[0]) # get the background color for each project. colors = _get_colors(len(proj)) p_c = 0 for p in proj: tasks = OrderedDict() temp = [x.split(".")[1] for x in file_list if p in x and "#" not in x and "~" not in x] cmspace = linspace(0.95, 0.25, len(temp)) cm = LinearSegmentedColormap.from_list("acorn.{}".format(p), ['#ffffff', colors[p_c]], N=max((len(temp), 25))) hues = [r2h(cm(cmi)) for cmi in cmspace] h_c = 0 for t in temp: tasks[t] = [hues[h_c],p+"."+t+".json"] h_c += 1 tasks["hex_color"] = colors[p_c] projects[p] = tasks p_c += 1 return projects
1,112,452
Internal function takes a list of prefix, namespace uri tuples and generates a SPARQL PREFIX string. Args: namespaces(list): List of tuples, defaults to BIBFRAME and Schema.org Returns: string
def build_prefixes(namespaces=None): if namespaces is None: namespaces = [ ('bf', str(BIBFRAME)), ('schema', str(SCHEMA_ORG)) ] output = "PREFIX {}: <{}>\n".format( namespaces[0][0], namespaces[0][1]) if len(namespaces) == 1: return output else: for namespace in namespaces[1:]: output += "PREFIX {}: <{}>\n".format(namespace[0], namespace[1]) return output
1,112,456
Function takes a subject and an existing graph, returns a new graph with all predicate and objects of the existing graph copied to the new_graph with subject as the new subject Args: subject(rdflib.URIRef): A URIRef subject existing_graph(rdflib.Graph): A rdflib.Graph Returns: rdflib.Graph
def copy_graph(subject, existing_graph): new_graph = rdflib.Graph() for predicate, object_ in existing_graph.predicate_objects(): new_graph.add((subject, predicate, object_)) return new_graph
1,112,457
Initializes a Repository object Args: app(Flask): Flask app, default is None base_url(str): Base url for Fedora Commons, defaults to localhost:8080. namespaces(list): List of namespace tuples of prefix, uri for each namespace in Fedora
def __init__( self, app=None, base_url='http://localhost:8080', namespaces=DEFAULT_NAMESPACES): self.app = app self.namespaces = namespaces self.base_url = None if app is not None: self.init_app(app) if 'FEDORA_BASE_URL' in app.config: self.base_url = app.config.get('FEDORA_BASE_URL') if self.base_url is None: self.base_url = base_url # Removes trailing forward-slash if self.base_url.endswith("/"): self.base_url = self.base_url[:-1] self.transaction = []
1,112,458
Internal method takes a RDF graph, cycles through the RDFS label and BIBFRAME authorizedAccessPoint triples to see if the graph's entity already exists in Fedora. As other searchable unique triples are added from other vocabularies, they should be added to this method. Args: subject(rdflib.rdflibURIRef): RDF Subject URI graph(rdflib.Graph): RDF Graph Returns: graph(rdflib.Graph): Existing RDF Graph in Fedora or None
def __dedup__(self, subject, graph): if graph is None: return for uri in Repository.DEFAULT_ID_URIS: # Checks for duplicates for obj_uri in graph.objects(subject=subject, predicate=uri): sparql_url = urllib.parse.urljoin( self.base_url, "rest/fcr:sparql") sparql_template = Template() sparql_query = sparql_template.substitute( uri=uri, obj_uri=obj_uri) search_request = urllib.request.Request( sparql_url, data=sparql_query.encode()) search_request.add_header( "Accept", "text/turtle") search_request.add_header( "Content-Type", "application/sparql-query") try: search_response = urllib.request.urlopen(search_request) if search_response.code < 400: return rdflib.Graph().parse( data=search_response.read(), format='turtle') except urllib.error.HTTPError: print("Error with sparql query:\n{}".format(sparql_query))
1,112,459
Initializes a Flask app object for the extension. Args: app(Flask): Flask app
def init_app(self, app): app.config.setdefault('FEDORA_BASE_URL', 'http://localhost:8080') if hasattr(app, 'teardown_appcontext'): app.teardown_appcontext(self.teardown) else: app.teardown_request(self.teardown)
1,112,460
Method attempts to connect to REST servers of the Fedora Commons repository using optional data parameter. Args: fedora_url(string): Fedora URL data(dict): Data to through to REST endpoint method(str): REST Method, defaults to GET Returns: result(string): Response string from Fedora
def connect(self, fedora_url, data=None, method='Get'): if data is None: data = {} if not fedora_url.startswith("http"): fedora_url = urllib.parse.urljoin(self.base_url, fedora_url) request = urllib.request.Request(fedora_url, method=method) request.add_header('Accept', 'text/turtle') request.add_header('Content-Type', 'text/turtle') if len(data) > 0: request.data = data try: response = urllib.request.urlopen(request) except urllib.error.URLError as err: if hasattr(err, 'reason'): print("failed to reach server at {} with {} method".format( fedora_url, request.method)) print("Reason: ", err.reason) print("Data: ", data) elif hasattr(err, 'code'): print("Server error {}".format(err.code)) raise err return response
1,112,462
Method takes a entity uri and attempts to return the Fedora Object as a JSON-LD. Args: entity_url(str): Fedora Commons URL of Entity context(None): Returns JSON-LD with Context, default is None Returns: str: JSON-LD of Fedora Object
def as_json(self, entity_url, context=None): try: urllib.request.urlopen(entity_url) except urllib.error.HTTPError: raise ValueError("Cannot open {}".format(entity_url)) entity_graph = self.read(entity_url) entity_json = json.loads( entity_graph.serialize( format='json-ld', context=context).decode()) return json.dumps(entity_json)
1,112,463
Method deletes a Fedora Object in the repository Args: uri(str): URI of Fedora Object
def delete(self, uri): try: self.connect(uri, method='DELETE') return True except urllib.error.HTTPError: return False
1,112,465
Method returns true is the entity exists in the Repository, false, otherwise Args: uri(str): Entity URI Returns: bool
def exists(self, uri): ##entity_uri = "/".join([self.base_url, entity_id]) try: urllib.request.urlopen(uri) return True except urllib.error.HTTPError: return False
1,112,466
Method inserts a new entity's property in Fedora4 Repository Args: entity_id(string): Unique ID of Fedora object property_uri(string): URI of property value: Value of the property, can be literal or URI reference Returns: boolean: True if successful changed in Fedora, False otherwise
def insert(self, entity_id, property_uri, value): if not entity_id.startswith("http"): entity_uri = urllib.parse.urljoin(self.base_url, entity_id) else: entity_uri = entity_id if entity_uri.endswith("/"): entity_uri = entity_uri[:-1] if not entity_id.endswith("fcr:metadata"): entity_uri = "/".join([entity_uri, "fcr:metadata"]) if not self.exists(entity_id): self.create(entity_id) sparql_template = Template() sparql = sparql_template.substitute( prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_uri=property_uri, value_str=self.__value_format__(value)) update_request = urllib.request.Request( entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'}) try: response = urllib.request.urlopen(update_request) except urllib.error.HTTPError: print("Error trying patch {}, sparql=\n{}".format(entity_uri, sparql)) return False if response.code < 400: return True return False
1,112,468
Method takes uri and creates a RDF graph from Fedora Repository Args: uri(str): URI of Fedora URI Returns: rdflib.Graph
def read(self, uri): read_response = self.connect(uri) fedora_graph = rdflib.Graph().parse( data=read_response.read(), format='turtle') return fedora_graph
1,112,469
Method removes a triple for the given/subject. Args: entity_id(string): Fedora Object ID, ideally URI of the subject property_uri(string): value(string): Return: boolean: True if triple was removed from the object
def remove(self, entity_id, property_uri, value): if not entity_id.startswith("http"): entity_uri = urllib.parse.urljoin(self.base_url, entity_id) else: entity_uri = entity_id sparql_template = Template() sparql = sparql_template.substitute( prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_name=property_uri, value_str=self.__value_format__(value)) delete_property_request = urllib.request.Request( entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'}) response = urllib.request.urlopen(delete_property_request) if response.code < 400: return True return False
1,112,470
Method replaces a triple for the given entity/subject. Property name is from the schema.org vocabulary. Args: entity_id(string): Unique ID of Fedora object property_name(string): Prefix and property name i.e. schema:name old_value(string): Literal or URI of old value value(string): Literal or new value
def replace(self, entity_id, property_name, old_value, value): if not entity_id.startswith("http"): entity_uri = '/'.join([self.base_url, self.transaction, entity_id]) else: entity_uri = entity_id sparql_template = Template() sparql = sparql_template.substitute( prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_name=property_name, old_value=self.__value_format__(old_value), new_value=self.__value_format__(value)) update_request = urllib.request.Request( entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'}) response = urllib.request.urlopen(update_request) if response.code < 400: return True return False
1,112,471
DEPRECIATED Method takes a query term and searches Fedora Repository using SPARQL search endpoint and returns a RDF graph of the search results. Args: query_term(str): String to search repository Returns: rdflib.Graph()
def search(self, query_term): fedora_search_url = "/".join([self.base_url, 'rest', 'fcr:search']) fedora_search_url = "{}?{}".format( fedora_search_url, urllib.parse.urlencode({"q": query_term})) search_request = urllib.request.Request( fedora_search_url, method='GET') search_request.add_header('Accept', 'text/turtle') try: search_response = urllib.request.urlopen(search_request) except urllib.error.URLError as error: raise error fedora_results = rdflib.Graph().parse( data=search_response.read(), format='turtle') return fedora_results
1,112,472
Make sure that 0 < heading < 360 Args: heading: base heading Returns: corrected heading
def _normalize_direction(heading: int) -> int: while heading > 359: heading = int(heading - 359) while heading < 0: heading = int(heading + 359) return heading
1,112,477
Creates a variation from a base value Args: mean: base value sigma: gaussian sigma Returns: random value
def _gauss(mean: int, sigma: int) -> int: return int(random.gauss(mean, sigma))
1,112,478
Creates a variation in wind speed Args: base_speed: base wind speed sigma: sigma value for gaussian variation Returns: random wind speed
def _randomize_speed(base_speed: int, sigma: int = None) -> int: if sigma is None: int_sigma = int(base_speed / 4) else: int_sigma = sigma val = MissionWeather._gauss(base_speed, int_sigma) if val < 0: return 0 return min(val, 50)
1,112,479
Creates a variation in direction Args: base_heading: base direction sigma: sigma value for gaussian variation Returns: random direction
def _randomize_direction(base_heading, sigma) -> int: val = MissionWeather._gauss(base_heading, sigma) val = MissionWeather._normalize_direction(val) return val
1,112,480
Applies weather to an opened Miz file (the mission will be mutated) Args: miz: source miz Returns: True
def apply_to_miz(self, miz): report = ['Building mission with weather:'] miz.mission.weather.wind_at_ground_level_dir = self.wind_at_ground_level_dir miz.mission.weather.wind_at_ground_level_speed = self.wind_at_ground_level_speed miz.mission.weather.wind_at2000_dir = self._randomize_direction(self.wind_dir, 40) miz.mission.weather.wind_at2000_speed = self._randomize_speed(5 + self.wind_at_ground_level_speed * 2) miz.mission.weather.wind_at8000_dir = self._randomize_direction(self.wind_dir, 80) miz.mission.weather.wind_at8000_speed = self._randomize_speed(10 + self.wind_at_ground_level_speed * 3) miz.mission.weather.turbulence_at_ground_level = self.turbulence _ground = f'{miz.mission.weather.wind_at_ground_level_dir}/{miz.mission.weather.wind_at_ground_level_speed}' _at2000 = f'{miz.mission.weather.wind_at2000_dir}/{miz.mission.weather.wind_at2000_speed}' _at8000 = f'{miz.mission.weather.wind_at8000_dir}/{miz.mission.weather.wind_at8000_speed}' _turbulence = f'{miz.mission.weather.turbulence_at_ground_level}' wind = f'Wind:' \ f'\n\tGround: {_ground}' \ f'\n\t2000m: {_at2000}' \ f'\n\t8000m: {_at8000}' \ f'\n\tTurbulence: {_turbulence}' report.append(wind) miz.mission.weather.atmosphere_type = 0 miz.mission.weather.qnh = self.qnh report.append(f'Atmosphere type: {miz.mission.weather.atmosphere_type}') report.append(f'QNH: {miz.mission.weather.qnh}') miz.mission.weather.visibility = self.visibility if self.fog_vis: miz.mission.weather.fog_thickness = 1000 miz.mission.weather.fog_visibility = self.fog_vis miz.mission.weather.fog_enabled = True else: miz.mission.weather.fog_enabled = False miz.mission.weather.fog_visibility = 0 miz.mission.weather.fog_thickness = 0 visibility = f'Visibility: {miz.mission.weather.visibility}' \ f'\n\tFog: {"yes" if miz.mission.weather.fog_enabled else "no"}' \ f'\n\tFog thickness: {miz.mission.weather.fog_thickness}' \ f'\n\tFog visibility: {miz.mission.weather.fog_visibility}' report.append(visibility) miz.mission.weather.temperature = self.temperature report.append(f'Temperature: {self.temperature}°C') miz.mission.weather.cloud_density = max(self.force_cloud_density, self.cloud_density) miz.mission.weather.cloud_thickness = self.cloud_thickness miz.mission.weather.cloud_base = self.cloud_base miz.mission.weather.precipitations = self.precipitations clouds = f'Clouds:' \ f'\n\tClouds density: {miz.mission.weather.cloud_density}' \ f'\n\tClouds thickness: {miz.mission.weather.cloud_thickness}' \ f'\n\tClouds base: {miz.mission.weather.cloud_base}' \ f'\n\tPrecipitations: {miz.mission.weather.precipitations}' report.append(clouds) LOGGER.debug('applying weather: %s', report) return True
1,112,489
Edit an opened MIZ file and sets the time and date and the weather Args: infile: source file outfile: output file (will default to source file) metar: metar string, ICAO or object to apply time: time string to apply (YYYYMMDDHHMMSS) min_wind: minimum wind max_wind: maximum wind Returns: String containing error
def edit_miz( # noqa: C901 infile: str, outfile: str = None, metar: typing.Union[str, Metar] = None, time: str = None, min_wind: int = 0, max_wind: int = 40 ) -> str: # noinspection SpellCheckingInspection if outfile is None: LOGGER.debug('editing in place: %s', infile) outfile = infile else: LOGGER.debug('editing miz file: %s -> %s', infile, outfile) mission_weather = mission_time = None if metar: error, metar = emiz.weather.custom_metar.CustomMetar.get_metar(metar) if error: return error mission_weather = emiz.weather.mission_weather.MissionWeather(metar, min_wind=min_wind, max_wind=max_wind) if time: try: mission_time = MissionTime.from_string(time) except ValueError: return f'badly formatted time string: {time}' if not mission_weather and not mission_time: return 'nothing to do!' with Miz(infile) as miz: if mission_weather: LOGGER.debug('applying MissionWeather') if not mission_weather.apply_to_miz(miz): return 'error while applying METAR to mission' if mission_time: LOGGER.debug('applying MissionTime') if not mission_time.apply_to_miz(miz): return 'error while setting time on mission' try: miz.zip(outfile) return '' except OSError: return f'permission error: cannot edit "{outfile}"; maybe it is in use ?'
1,112,613
Grab a type definition, returns a typecode class definition because the facets (name, minOccurs, maxOccurs) must be provided. Parameters: namespaceURI -- name --
def getTypeDefinition(cls, namespaceURI, name, lazy=False): klass = cls.types.get((namespaceURI, name), None) if lazy and klass is not None: return _Mirage(klass) return klass
1,112,748
Grab an element declaration, returns a typecode instance representation or a typecode class definition. An element reference has its own facets, and is local so it will not be cached. Parameters: namespaceURI -- name -- isref -- if element reference, return class definition.
def getElementDeclaration(cls, namespaceURI, name, isref=False, lazy=False): key = (namespaceURI, name) if isref: klass = cls.elements.get(key,None) if klass is not None and lazy is True: return _Mirage(klass) return klass typecode = cls.element_typecode_cache.get(key, None) if typecode is None: tcls = cls.elements.get(key,None) if tcls is not None: typecode = cls.element_typecode_cache[key] = tcls() typecode.typed = False return typecode
1,112,749
if xsi:type does not match the instance type attr, check to see if it is a derived type substitution. DONT Return the element's type. Parameters: elt -- the DOM element being parsed ps -- the ParsedSoap object.
def getSubstituteType(self, elt, ps): pyclass = SchemaInstanceType.getTypeDefinition(*self.type) if pyclass is None: raise EvaluateException( 'No Type registed for xsi:type=(%s, %s)' % (self.type[0], self.type[1]), ps.Backtrace(elt)) typeName = _find_type(elt) prefix,typeName = SplitQName(typeName) uri = ps.GetElementNSdict(elt).get(prefix) subclass = SchemaInstanceType.getTypeDefinition(uri, typeName) if subclass is None: raise EvaluateException( 'No registered xsi:type=(%s, %s), substitute for xsi:type=(%s, %s)' % (uri, typeName, self.type[0], self.type[1]), ps.Backtrace(elt)) if not issubclass(subclass, pyclass) and subclass(None) and not issubclass(subclass, pyclass): raise TypeError( 'Substitute Type (%s, %s) is not derived from %s' % (self.type[0], self.type[1], pyclass), ps.Backtrace(elt)) return subclass((self.nspname, self.pname))
1,112,752
return a wrapper for pyobj, with typecode attribute set. Parameters: pyobj -- instance of builtin type (immutable) what -- typecode describing the data
def WrapImmutable(cls, pyobj, what): d = cls.types_dict if type(pyobj) is bool: pyclass = d[int] elif d.has_key(type(pyobj)) is True: pyclass = d[type(pyobj)] else: raise TypeError,\ 'Expecting a built-in type in %s (got %s).' %( d.keys(),type(pyobj)) newobj = pyclass(pyobj) newobj.typecode = what return newobj
1,112,761
Checks parameters such as codon_positions, aminoacids... to return the required sequence as string. Parameters: seq_record (SeqRecordExpanded object): codon_positions (str): aminoacids (boolean): Returns: Namedtuple containing ``seq (str)`` and ``warning (str)``.
def get_seq(seq_record, codon_positions, aminoacids=False, degenerate=None): Sequence = namedtuple('Sequence', ['seq', 'warning']) if codon_positions not in [None, '1st', '2nd', '3rd', '1st-2nd', 'ALL']: raise WrongParameterFormat("`codon_positions` argument should be any of the following" ": 1st, 2nd, 3rd, 1st-2nd or ALL") if aminoacids: aa = seq_record.translate() if '*' in aa: warning = "Gene {0}, sequence {1} contains stop codons '*'".format(seq_record.gene_code, seq_record.voucher_code) else: warning = None return Sequence(seq=aa, warning=warning) if degenerate: return Sequence(seq=seq_record.degenerate(degenerate), warning=None) if codon_positions == '1st': return Sequence(seq=seq_record.first_codon_position(), warning=None) elif codon_positions == '2nd': return Sequence(seq=seq_record.second_codon_position(), warning=None) elif codon_positions == '3rd': return Sequence(seq=seq_record.third_codon_position(), warning=None) elif codon_positions == '1st-2nd': return Sequence(seq=seq_record.first_and_second_codon_positions(), warning=None) else: # None and ALL return Sequence(seq=str(seq_record.seq), warning=None)
1,112,786
Creates the dataset header for NEXUS files from ``#NEXUS`` to ``MATRIX``. Parameters: data (namedtuple): with necessary info for dataset creation. file_format (str): TNT, PHYLIP, NEXUS, FASTA aminoacids (boolean): If ``aminoacids is True`` the header will show ``DATATYPE=PROTEIN`` otherwise it will be ``DNA``.
def make_dataset_header(data, file_format, aminoacids): if aminoacids: datatype = 'PROTEIN' else: datatype = 'DNA' if file_format in ['NEXUS', 'PHYLIP', 'FASTA']: header = .format(data.number_taxa, data.number_chars, datatype) elif file_format == 'MEGA': return "#MEGA\n!TITLE title;" else: # file_format: TNT if aminoacids: molecule_type = "prot" else: molecule_type = "dna" header = .format(molecule_type, data.number_chars, data.number_taxa) return header.strip()
1,112,789
Handles decoding of the CSV `data`. Args: data (str): Data which will be decoded. Returns: dict: Dictionary with decoded data.
def decode(data): # try to guess dialect of the csv file dialect = None try: dialect = csv.Sniffer().sniff(data) except Exception: pass # parse data with csv parser handler = None try: data = data.splitlines() # used later handler = csv.reader(data, dialect) except Exception, e: raise MetaParsingException("Can't parse your CSV data: %s" % e.message) # make sure, that data are meaningful decoded = [] for cnt, line in enumerate(handler): usable_data = filter(lambda x: x.strip(), line) if not usable_data: continue if len(usable_data) != 2: raise MetaParsingException( "Bad number of elements - line %d:\n\t%s\n" % (cnt, data[cnt]) ) # remove trailing spaces, decode to utf-8 usable_data = map(lambda x: x.strip().decode("utf-8"), usable_data) # remove quotes if the csv.Sniffer failed to decode right `dialect` usable_data = map(lambda x: _remove_quotes(x), usable_data) decoded.append(usable_data) # apply another checks to data decoded = validator.check_structure(decoded) return decoded
1,113,086
Guess strategy type to use for file by extension. Args: file_name_or_ext: Either a file name with an extension or just an extension Returns: Strategy: Type corresponding to extension or None if there's no corresponding strategy type
def guess_strategy_type(file_name_or_ext): if '.' not in file_name_or_ext: ext = file_name_or_ext else: name, ext = os.path.splitext(file_name_or_ext) ext = ext.lstrip('.') file_type_map = get_file_type_map() return file_type_map.get(ext, None)
1,113,133
Search for events with the provided title Args: event_title: The title of the event Returns: An event JSON object returned from the server with the following: { "meta":{ "limit": 20, "next": null, "offset": 0, "previous": null, "total_count": 3 }, "objects": [{}, {}, etc] } or None if an error occurred.
def get_events(self, event_title, regex=False): regex_val = 0 if regex: regex_val = 1 r = requests.get('{0}/events/?api_key={1}&username={2}&c-title=' '{3}&regex={4}'.format(self.url, self.api_key, self.username, event_title, regex_val), verify=self.verify) if r.status_code == 200: json_obj = json.loads(r.text) return json_obj else: log.error('Non-200 status code from get_event: ' '{}'.format(r.status_code)) return None
1,113,217