code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def call(self, cmd, **kwargs): if isinstance(cmd, basestring): cmd = cmd.split() self.log.info('Running %s', cmd) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) out, err = p.communicate() if out: self.log.info(out) if err: if p.returncode == 0: self.log.info(err) else: self.log.error(err) if p.returncode != 0: self.log.error('returncode = %d' % p.returncode) raise Exception return out, err, p.returncode
A simple subprocess wrapper
def merge(self, other): newstart = min(self._start, other.start) newend = max(self._end, other.end) return Range(newstart, newend)
Merge this range object with another (ranges need not overlap or abut). :returns: a new Range object representing the interval containing both ranges.
def intersect(self, other): if not self.overlap(other): return None newstart = max(self._start, other.start) newend = min(self._end, other.end) return Range(newstart, newend)
Determine the interval of overlap between this range and another. :returns: a new Range object representing the overlapping interval, or `None` if the ranges do not overlap.
def overlap(self, other): if self._start < other.end and self._end > other.start: return True return False
Determine whether this range overlaps with another.
def contains(self, other): return self._start <= other.start and self._end >= other.end
Determine whether this range contains another.
def transform(self, offset): assert self._start + offset > 0, \ ('offset {} invalid; resulting range [{}, {}) is ' 'undefined'.format(offset, self._start+offset, self._end+offset)) self._start += offset self._end += offset
Shift this range by the specified offset. Note: the resulting range must be a valid interval.
def runningMedian(seq, M): seq = iter(seq) s = [] m = M // 2 #// does a truncated division like integer division in Python 2 # Set up list s (to be sorted) and load deque with first window of seq s = [item for item in islice(seq,M)] d = deque(s) # Simple lambda function to handle even/odd window sizes median = lambda : s[m] if bool(M&1) else (s[m-1]+s[m])*0.5 # Sort it in increasing order and extract the median ("center" of the sorted window) s.sort() medians = [median()] # Now slide the window by one point to the right for each new position (each pass through # the loop). Stop when the item in the right end of the deque contains the last item in seq for item in seq: old = d.popleft() # pop oldest from left d.append(item) # push newest in from right del s[bisect_left(s, old)] # locate insertion point and then remove old insort(s, item) # insert newest such that new sort is not required medians.append(median()) return medians
Purpose: Find the median for the points in a sliding window (odd number in size) as it is moved from left to right by one point at a time. Inputs: seq -- list containing items for which a running median (in a sliding window) is to be calculated M -- number of items in window (window size) -- must be an integer > 1 Otputs: medians -- list of medians with size N - M + 1 Note: 1. The median of a finite list of numbers is the "center" value when this list is sorted in ascending order. 2. If M is an even number the two elements in the window that are close to the center are averaged to give the median (this is not by definition)
def runningMean(seq, N, M): # Load deque (d) with first window of seq d = deque(seq[0:M]) means = [sum(d) / len(d)] # contains mean of first window # Now slide the window by one point to the right for each new position (each pass through # the loop). Stop when the item in the right end of the deque contains the last item in seq for item in islice(seq, M, N): old = d.popleft() # pop oldest from left d.append(item) # push newest in from right m = sum(d) / len(d) means.append(m) # mean for current window return means
Purpose: Find the mean for the points in a sliding window (fixed size) as it is moved from left to right by one point at a time. Inputs: seq -- list containing items for which a mean (in a sliding window) is to be calculated (N items) N -- length of sequence M -- number of items in sliding window Otputs: means -- list of means with size N - M + 1
def behave(cmdline, cwd=".", **kwargs): assert isinstance(cmdline, six.string_types) return run("behave " + cmdline, cwd=cwd, **kwargs)
Run behave as subprocess command and return process/shell instance with results (collected output, returncode).
def get_field_template(self, bound_field, template_name=None): template_name = super().get_field_template(bound_field, template_name) if (template_name == self.field_template and isinstance(bound_field.field.widget, ( forms.RadioSelect, forms.CheckboxSelectMultiple))): return 'tapeforms/fields/foundation_fieldset.html' return template_name
Uses a special field template for widget with multiple inputs. It only applies if no other template than the default one has been defined.
def printer(self): print " ID " + repr(self.id) if self.type == 0: print " Tag: - " print " Start State - " elif self.type == 1: print " Push " + repr(self.sym) elif self.type == 2: print " Pop State " + repr(self.sym) elif self.type == 3: print " Read State " + repr(self.sym) elif self.type == 4: print " Stop State " + repr(self.sym) for j in self.trans: if len(self.trans[j]) > 1 or (len(self.trans[j]) == 1): for symbol in self.trans[j]: print " On Symbol " + repr(symbol) + " Transition To State " + repr(j)
Prints PDA state attributes
def printer(self): i = 0 while i < self.n + 1: print "--------- State No --------" + repr(i) self.s[i].printer() i = i + 1
Prints PDA states and their attributes
def consume_input(self, mystr, stack=[], state=1, curchar=0, depth=0): mystrsplit = mystr.split(' ') if self.s[state].type == 1: stack.append(self.s[state].sym) if len(self.s[state].trans) > 0: state = self.s[state].trans[0] if self.parse( mystr, stack=stack, state=state, curchar=curchar, depth=depth + 1) == 1: return True return False if self.s[state].type == 2: if len(stack) == 0: return False sym = stack.pop() for key in self.s[state].trans: if sym in self.s[state].trans[key]: if self.parse( mystr, stack=stack, state=key, curchar=curchar, depth=depth + 1) == 1: return True return False if self.s[state].type == 3: for key in self.s[state].trans: if mystrsplit[curchar] in self.s[state].trans[key]: # print 'found ' if curchar + 1 == len(mystrsplit) \ and 'closing' in self.s[key].trans: return True elif curchar + 1 == len(mystrsplit): return False # print 'lets try as next state the state ' + repr(key) if self.parse( mystr, stack=stack, state=key, curchar=curchar + 1, depth=depth + 1) == 1: return True return False
Consumes an input and validates if it is accepted Args: mystr (str): the input string to be consumes stack (list): the stack of symbols state (int): the current state of the PDA curchar (int): the index of the consumed character depth (int): the depth of the function call in the stack Returns: bool: A value indicating the correct or erroneous execution
def _CreateDatabase(self): goodlogging.Log.Info("DB", "Initialising new database", verbosity=self.logVerbosity) with sqlite3.connect(self._dbPath) as db: # Configuration tables db.execute("CREATE TABLE Config (" "Name TEXT UNIQUE NOT NULL, " "Value TEXT)") db.execute("CREATE TABLE IgnoredDir (" "DirName TEXT UNIQUE NOT NULL)") db.execute("CREATE TABLE SupportedFormat (" "FileFormat TEXT UNIQUE NOT NULL)") # Look-up tables db.execute("CREATE TABLE TVLibrary (" "ShowID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, " "ShowName TEXT UNIQUE NOT NULL, " "ShowDir TEXT UNIQUE)") db.execute("CREATE TABLE FileName (" "FileName TEXT UNIQUE NOT NULL, " "ShowID INTEGER, " "FOREIGN KEY (ShowID) REFERENCES ShowName(ShowID))") db.execute("CREATE TABLE SeasonDir (" "ShowID INTEGER, " "Season INTEGER NOT NULL, " "SeasonDir TEXT NOT NULL, " "FOREIGN KEY (ShowID) REFERENCES ShowName(ShowID)," "CONSTRAINT SeasonDirPK PRIMARY KEY (ShowID,Season))") db.commit() goodlogging.Log.Info("DB", "Database initialisation complete", verbosity=self.logVerbosity)
Create all database tables.
def _ActionDatabase(self, cmd, args = None, commit = True, error = True): goodlogging.Log.Info("DB", "Database Command: {0} {1}".format(cmd, args), verbosity=self.logVerbosity) with sqlite3.connect(self._dbPath) as db: try: if args is None: result = db.execute(cmd) else: result = db.execute(cmd, args) except sqlite3.OperationalError: if error is True: raise return None else: if commit is True: db.commit() return result.fetchall()
Do action on database. Parameters ---------- cmd : string SQL command. args : tuple [optional : default = None] Arguments to be passed along with the SQL command. e.g. cmd="SELECT Value FROM Config WHERE Name=?" args=(fieldName, ) commit : boolean [optional : default = True] If true commit database changes after command is executed. error : boolean [optional : default = True] If False then any sqlite3.OperationalError exceptions will cause this function to return None, otherwise the exception will be raised. Returns ---------- If a valid result is obtained from the database this will be returned. If an error occurs and the error argument is set to False then the return value will be None.
def _PurgeTable(self, tableName): goodlogging.Log.Info("DB", "Deleting all entries from table {0}".format(tableName), verbosity=self.logVerbosity) self._ActionDatabase("DELETE FROM {0}".format(tableName))
Deletes all rows from given table without dropping table. Parameters ---------- tableName : string Name of table.
def GetConfigValue(self, fieldName): result = self._ActionDatabase("SELECT Value FROM Config WHERE Name=?", (fieldName, )) if result is None: return None elif len(result) == 0: return None elif len(result) == 1: goodlogging.Log.Info("DB", "Found database match in config table {0}={1}".format(fieldName, result[0][0]), verbosity=self.logVerbosity) return result[0][0] elif len(result) > 1: goodlogging.Log.Fatal("DB", "Database corrupted - multiple matches found in config table {0}={1}".format(fieldName, result))
Match given field name in Config table and return corresponding value. Parameters ---------- fieldName : string String matching Name column in Config table. Returns ---------- string or None If a match is found the corresponding entry in the Value column of the database table is returned, otherwise None is returned (or if multiple matches are found a fatal error is raised).
def SetConfigValue(self, fieldName, value): currentConfigValue = self.GetConfigValue(fieldName) if currentConfigValue is None: goodlogging.Log.Info("DB", "Adding {0}={1} to database config table".format(fieldName, value), verbosity=self.logVerbosity) self._ActionDatabase("INSERT INTO Config VALUES (?,?)", (fieldName, value)) else: goodlogging.Log.Info("DB", "Updating {0} in database config table from {1} to {2}".format(fieldName, currentConfigValue, value), verbosity=self.logVerbosity) self._ActionDatabase("UPDATE Config SET Value=? WHERE Name=?", (value, fieldName))
Set value in Config table. If a entry already exists this is updated with the new value, otherwise a new entry is added. Parameters ---------- fieldName : string String to be inserted or matched against Name column in Config table. value : string Entry to be inserted or updated in Value column of Config table.
def _AddToSingleColumnTable(self, tableName, columnHeading, newValue): match = None currentTable = self._GetFromSingleColumnTable(tableName) if currentTable is not None: for currentValue in currentTable: if currentValue == newValue: match = True if match is None: goodlogging.Log.Info("DB", "Adding {0} to {1} table".format(newValue, tableName), verbosity=self.logVerbosity) self._ActionDatabase("INSERT INTO {0} VALUES (?)".format(tableName), (newValue, )) else: goodlogging.Log.Info("DB", "{0} already exists in {1} table".format(newValue, tableName), verbosity=self.logVerbosity) ############################################################################ # _GetFromSingleColumnTable ############################################################################ """ Get all entries from a table containing a single column. Parameters ---------- tableName : string Name of table to add entry to. Returns ---------- list or None If either no table or no rows are found this returns None, otherwise a list of all table entries is returned. """
Add an entry to a table containing a single column. Checks existing table entries to avoid duplicate entries if the given value already exists in the table. Parameters ---------- tableName : string Name of table to add entry to. columnHeading : string Name of column heading. newValue : string New value to add to table.
def AddShowToTVLibrary(self, showName): goodlogging.Log.Info("DB", "Adding {0} to TV library".format(showName), verbosity=self.logVerbosity) currentShowValues = self.SearchTVLibrary(showName = showName) if currentShowValues is None: self._ActionDatabase("INSERT INTO TVLibrary (ShowName) VALUES (?)", (showName, )) showID = self._ActionDatabase("SELECT (ShowID) FROM TVLibrary WHERE ShowName=?", (showName, ))[0][0] return showID else: goodlogging.Log.Fatal("DB", "An entry for {0} already exists in the TV library".format(showName))
Add show to TVLibrary table. If the show already exists in the table a fatal error is raised. Parameters ---------- showName : string Show name to add to TV library table. Returns ---------- int Unique show id generated for show when it is added to the table. Used across the database to reference this show.
def UpdateShowDirInTVLibrary(self, showID, showDir): goodlogging.Log.Info("DB", "Updating TV library for ShowID={0}: ShowDir={1}".format(showID, showDir)) self._ActionDatabase("UPDATE TVLibrary SET ShowDir=? WHERE ShowID=?", (showDir, showID))
Update show directory entry for given show id in TVLibrary table. Parameters ---------- showID : int Show id value. showDir : string Show directory name.
def SearchFileNameTable(self, fileName): goodlogging.Log.Info("DB", "Looking up filename string '{0}' in database".format(fileName), verbosity=self.logVerbosity) queryString = "SELECT ShowID FROM FileName WHERE FileName=?" queryTuple = (fileName, ) result = self._ActionDatabase(queryString, queryTuple, error = False) if result is None: goodlogging.Log.Info("DB", "No match found in database for '{0}'".format(fileName), verbosity=self.logVerbosity) return None elif len(result) == 0: return None elif len(result) == 1: goodlogging.Log.Info("DB", "Found file name match: {0}".format(result), verbosity=self.logVerbosity) return result[0][0] elif len(result) > 1: goodlogging.Log.Fatal("DB", "Database corrupted - multiple matches found in database table for: {0}".format(result))
Search FileName table. Find the show id for a given file name. Parameters ---------- fileName : string File name to look up in table. Returns ---------- int or None If a match is found in the database table the show id for this entry is returned, otherwise this returns None.
def AddToFileNameTable(self, fileName, showID): goodlogging.Log.Info("DB", "Adding filename string match '{0}'={1} to database".format(fileName, showID), verbosity=self.logVerbosity) currentValues = self.SearchFileNameTable(fileName) if currentValues is None: self._ActionDatabase("INSERT INTO FileName (FileName, ShowID) VALUES (?,?)", (fileName, showID)) else: goodlogging.Log.Fatal("DB", "An entry for '{0}' already exists in the FileName table".format(fileName))
Add entry to FileName table. If the file name and show id combination already exists in the table a fatal error is raised. Parameters ---------- fileName : string File name. showID : int Show id.
def SearchSeasonDirTable(self, showID, seasonNum): goodlogging.Log.Info("DB", "Looking up directory for ShowID={0} Season={1} in database".format(showID, seasonNum), verbosity=self.logVerbosity) queryString = "SELECT SeasonDir FROM SeasonDir WHERE ShowID=? AND Season=?" queryTuple = (showID, seasonNum) result = self._ActionDatabase(queryString, queryTuple, error = False) if result is None: goodlogging.Log.Info("DB", "No match found in database", verbosity=self.logVerbosity) return None elif len(result) == 0: return None elif len(result) == 1: goodlogging.Log.Info("DB", "Found database match: {0}".format(result), verbosity=self.logVerbosity) return result[0][0] elif len(result) > 1: goodlogging.Log.Fatal("DB", "Database corrupted - multiple matches found in database table for: {0}".format(result))
Search SeasonDir table. Find the season directory for a given show id and season combination. Parameters ---------- showID : int Show id for given show. seasonNum : int Season number. Returns ---------- string or None If no match is found this returns None, if a single match is found then the season directory name value is returned. If multiple matches are found a fatal error is raised.
def AddSeasonDirTable(self, showID, seasonNum, seasonDir): goodlogging.Log.Info("DB", "Adding season directory ({0}) to database for ShowID={1}, Season={2}".format(seasonDir, showID, seasonNum), verbosity=self.logVerbosity) currentValue = self.SearchSeasonDirTable(showID, seasonNum) if currentValue is None: self._ActionDatabase("INSERT INTO SeasonDir (ShowID, Season, SeasonDir) VALUES (?,?,?)", (showID, seasonNum, seasonDir)) else: if currentValue == seasonDir: goodlogging.Log.Info("DB", "A matching entry already exists in the SeasonDir table", verbosity=self.logVerbosity) else: goodlogging.Log.Fatal("DB", "A different entry already exists in the SeasonDir table")
Add entry to SeasonDir table. If a different entry for season directory is found for the given show id and season number combination this raises a fatal error. Parameters ---------- showID : int Show id. seasonNum : int Season number. seasonDir : string Season directory name.
def PrintAllTables(self): goodlogging.Log.Info("DB", "Database contents:\n") for table in self._tableDict.keys(): self._PrintDatabaseTable(table)
Prints contents of every table.
def _get_minidom_tag_value(station, tag_name): tag = station.getElementsByTagName(tag_name)[0].firstChild if tag: return tag.nodeValue return None
get a value from a tag (if it exists)
def _parse(data, obj_name, attr_map): parsed_xml = minidom.parseString(data) parsed_objects = [] for obj in parsed_xml.getElementsByTagName(obj_name): parsed_obj = {} for (py_name, xml_name) in attr_map.items(): parsed_obj[py_name] = _get_minidom_tag_value(obj, xml_name) parsed_objects.append(parsed_obj) return parsed_objects
parse xml data into a python map
def get_all_stations(self, station_type=None): params = None if station_type and station_type in STATION_TYPE_TO_CODE_DICT: url = self.api_base_url + 'getAllStationsXML_WithStationType' params = { 'stationType': STATION_TYPE_TO_CODE_DICT[station_type] } else: url = self.api_base_url + 'getAllStationsXML' response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] return self._parse_station_list(response.content)
Returns information of all stations. @param<optional> station_type: ['mainline', 'suburban', 'dart']
def get_all_current_trains(self, train_type=None, direction=None): params = None if train_type: url = self.api_base_url + 'getCurrentTrainsXML_WithTrainType' params = { 'TrainType': STATION_TYPE_TO_CODE_DICT[train_type] } else: url = self.api_base_url + 'getCurrentTrainsXML' response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] trains = self._parse_all_train_data(response.content) if direction is not None: return self._prune_trains(trains, direction=direction) return trains
Returns all trains that are due to start in the next 10 minutes @param train_type: ['mainline', 'suburban', 'dart']
def get_station_by_name(self, station_name, num_minutes=None, direction=None, destination=None, stops_at=None): url = self.api_base_url + 'getStationDataByNameXML' params = { 'StationDesc': station_name } if num_minutes: url = url + '_withNumMins' params['NumMins'] = num_minutes response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] trains = self._parse_station_data(response.content) if direction is not None or destination is not None: return self._prune_trains(trains, direction=direction, destination=destination, stops_at=stops_at) return trains
Returns all trains due to serve station `station_name`. @param station_code @param num_minutes. Only trains within this time. Between 5 and 90 @param direction Filter by direction. Northbound or Southbound @param destination Filter by name of the destination stations @param stops_at Filber by name of one of the stops
def _prune_trains(self, trains, direction=None, destination=None, stops_at=None): pruned_data = [] for train in trains: append = True if direction is not None and train["direction"] != direction: append = False if destination is not None and train["destination"] != destination: append = False if append and stops_at is not None: if stops_at != train['destination']: stops = self.get_train_stops(train["code"]) for stop in stops: append = False if stop["location"] == stops_at: append = True break if append: pruned_data.append(train) return pruned_data
Only return the data matching direction and / or destination. If stops_at is set this may do a number of extra HTTP requests @param trains list of trains to filter @param direction Filter by train direction. Northbound or Southbound @param destination Filter by name of the destination stations @param stops_at Filber by name of one of the stops
def get_train_stops(self, train_code, date=None): if date is None: date = datetime.date.today().strftime("%d %B %Y") url = self.api_base_url + 'getTrainMovementsXML' params = { 'TrainId': train_code, 'TrainDate': date } response = requests.get( url, params=params, timeout=10) if response.status_code != 200: return [] return self._parse_train_movement_data(response.content)
Get details for a train. @param train_code code for the trian @param date Date in format "15 oct 2017". If none use today
def fill_fields(self, **kwargs): for name, value in kwargs.items(): field = getattr(self, name) field.send_keys(value)
Fills the fields referenced by kwargs keys and fill them with the value
def selector(self, fieldname): finder = self._finders[fieldname] return (finder._by, finder._selector)
Gets a selector for the given page element as a tuple (by, selector)
def authorize_url(client_id=None, redirect_uri=None, state=None, scopes=None, show_dialog=False, http_client=None): params = { 'client_id': client_id or os.environ.get('SPOTIFY_CLIENT_ID'), 'redirect_uri': redirect_uri or os.environ.get('SPOTIFY_REDIRECT_URI'), 'state': state or str(uuid.uuid4()).replace('-', ''), 'scope': ' '.join(scopes) if scopes else '', 'show_dialog': show_dialog, 'response_type': 'code' } query = ['{}={}'.format(k, v) for k, v in params.items()] return '{}?{}'.format('https://accounts.spotify.com/authorize', '&'.join(query))
Trigger authorization dialog :param str client_id: Client ID :param str redirect_uri: Application Redirect URI :param str state: Application State :param List[str] scopes: Scopes to request :param bool show_dialog: Show the dialog :param http_client: HTTP Client for requests :return str Authorize URL :rtype str
def refresh(self): data = { 'grant_type': 'refresh_token', 'refresh_token': self._token.refresh_token } response = self.http_client.post(self.URL, data=data, auth=(self.client_id, self.client_secret)) response.raise_for_status() self._token = Token.from_json(response.json())
Refresh the access token
def instance_of(cls): def check(value): return ( isinstance(value, cls), u"{value!r} is instance of {actual!s}, required {required!s}".format( value=value, actual=fullyQualifiedName(type(value)), required=fullyQualifiedName(cls), ), ) return check
Create an invariant requiring the value is an instance of ``cls``.
def provider_of(iface): def check(value): return ( iface.providedBy(value), u"{value!r} does not provide {interface!s}".format( value=value, interface=fullyQualifiedName(iface), ), ) return check
Create an invariant requiring the value provides the zope.interface ``iface``.
def temp_dir(suffix='', prefix='tmp', parent_dir=None, make_cwd=False): prev_cwd = os.getcwd() parent_dir = parent_dir if parent_dir is None else str(parent_dir) abs_path = tempfile.mkdtemp(suffix, prefix, parent_dir) path = pathlib.Path(abs_path) try: if make_cwd: os.chdir(str(abs_path)) yield path.resolve() finally: if make_cwd: os.chdir(prev_cwd) with temporary.util.allow_missing_file(): shutil.rmtree(str(abs_path))
Create a temporary directory and optionally change the current working directory to it. The directory is deleted when the context exits. The temporary directory is created when entering the context manager, and deleted when exiting it: >>> import temporary >>> with temporary.temp_dir() as temp_dir: ... assert temp_dir.is_dir() >>> assert not temp_dir.exists() This time let's make the temporary directory our working directory: >>> import os >>> with temporary.temp_dir(make_cwd=True) as temp_dir: ... assert str(temp_dir) == os.getcwd() >>> assert not str(temp_dir) == os.getcwd() The suffix, prefix, and parent_dir options are passed to the standard ``tempfile.mkdtemp()`` function: >>> with temporary.temp_dir() as p: ... with temporary.temp_dir(suffix='suf', prefix='pre', parent_dir=p) as d: ... assert d.parent == p ... assert d.name.startswith('pre') ... assert d.name.endswith('suf') This function can also be used as a decorator, with the in_temp_dir alias: >>> @temporary.in_temp_dir() ... def my_function(): ... assert old_cwd != os.getcwd() ... >>> old_cwd = os.getcwd() >>> my_function() >>> assert old_cwd == os.getcwd()
def openSafeReplace(filepath, mode='w+b'): tempfileName = None #Check if the filepath can be accessed and is writable before creating the #tempfile if not _isFileAccessible(filepath): raise IOError('File %s is not writtable' % (filepath, )) with tempfile.NamedTemporaryFile(delete=False, mode=mode) as tmpf: tempfileName = tmpf.name yield tmpf #Check if the filepath can be accessed and is writable before moving the #tempfile if not _isFileAccessible(filepath): raise IOError('File %s is not writtable' % (filepath, )) #Note: here unhandled exceptions may still occur because of race conditions, #messing things up. shutil.move(tempfileName, filepath)
Context manager to open a temporary file and replace the original file on closing.
def _isFileAccessible(filepath): directory = os.path.dirname(filepath) if not os.access(directory, os.W_OK): #Return False if directory does not exist or is not writable return False if os.path.exists(filepath): if not os.access(filepath, os.W_OK): #Return False if file is not writable return False try: openfile = os.open(filepath, os.O_WRONLY) os.close(openfile) except IOError: #Return False if file is locked return False #Return True if file is writtable return True
Returns True if the specified filepath is writable.
def writeJsonZipfile(filelike, data, compress=True, mode='w', name='data'): zipcomp = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED with zipfile.ZipFile(filelike, mode, allowZip64=True) as containerFile: containerFile.writestr(name, json.dumps(data, cls=MaspyJsonEncoder), zipcomp )
Serializes the objects contained in data to a JSON formated string and writes it to a zipfile. :param filelike: path to a file (str) or a file-like object :param data: object that should be converted to a JSON formated string. Objects and types in data must be supported by the json.JSONEncoder or have the method ``._reprJSON()`` defined. :param compress: bool, True to use zip file compression :param mode: 'w' to truncate and write a new file, or 'a' to append to an existing file :param name: the file name that will be given to the JSON output in the archive
def writeBinaryItemContainer(filelike, binaryItemContainer, compress=True): allMetadata = dict() binarydatafile = io.BytesIO() #Note: It would be possible to sort the items here for index, binaryItem in enumerate(viewvalues(binaryItemContainer)): metadataList = _dumpArrayDictToFile(binarydatafile, binaryItem.arrays) allMetadata[index] = [binaryItem._reprJSON(), metadataList] #TODO: Is seek here still necessary? binarydatafile.seek(0) zipcomp = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED with zipfile.ZipFile(filelike, 'w', allowZip64=True) as containerFile: containerFile.writestr('metadata', json.dumps(allMetadata, cls=MaspyJsonEncoder), zipcomp ) containerFile.writestr('binarydata', binarydatafile.getvalue(), zipcomp)
Serializes the binaryItems contained in binaryItemContainer and writes them into a zipfile archive. Examples of binaryItem classes are :class:`maspy.core.Ci` and :class:`maspy.core.Sai`. A binaryItem class has to define the function ``_reprJSON()`` which returns a JSON formated string representation of the class instance. In addition it has to contain an attribute ``.arrays``, a dictionary which values are ``numpy.array``, that are serialized to bytes and written to the ``binarydata`` file of the zip archive. See :func:`_dumpArrayDictToFile()` The JSON formated string representation of the binaryItems, together with the metadata, necessary to restore serialized numpy arrays, is written to the ``metadata`` file of the archive in this form: ``[[serialized binaryItem, [metadata of a numpy array, ...]], ...]`` Use the method :func:`loadBinaryItemContainer()` to restore a binaryItemContainer from a zipfile. :param filelike: path to a file (str) or a file-like object :param binaryItemContainer: a dictionary containing binaryItems :param compress: bool, True to use zip file compression
def _dumpArrayDictToFile(filelike, arrayDict): metadataList = list() for arrayKey in sorted(arrayDict): array = arrayDict[arrayKey] if array.ndim == 1: metadata = _dumpArrayToFile(filelike, array) else: metadata = _dumpNdarrayToFile(filelike, array) metadata['arrayKey'] = arrayKey metadataList.append(metadata) return metadataList
Function to serialize and write ``numpy.array`` contained in a dictionary to a file. See also :func:`_dumpArrayToFile` and :func:`_dumpNdarrayToFile`. :param filelike: can be a file or a file-like object that provides the methods ``.write()`` and ``.tell()``. :param arrayDict: a dictionary which values are ``numpy.array``, that are serialized to bytes and written to the filelike. :returns: a list of metadata dictionaries a metadata dictionary contains information necessary to restore the ``numpy.arrays`` from the file and the corresponding key from the arrayDict as 'arrayKey'.
def _dumpArrayToFile(filelike, array): bytedata = array.tobytes('C') start = filelike.tell() end = start + len(bytedata) metadata = {'start': start, 'end': end, 'size': array.size, 'dtype': array.dtype.name } filelike.write(bytedata) return metadata
Serializes a 1-dimensional ``numpy.array`` to bytes, writes the bytes to the filelike object and returns a dictionary with metadata, necessary to restore the ``numpy.array`` from the file. :param filelike: can be a file or a file-like object that provides the methods ``.write()`` and ``.tell()``. :param array: a 1-dimensional ``numpy.array`` :returns: a metadata dictionary :: {'start': start position in the file, 'end': end position in the file, 'size': size of the array, 'dtype': numpy data type of the array }
def _dumpNdarrayToFile(filelike, ndarray): bytedata = ndarray.tobytes('C') start = filelike.tell() end = start + len(bytedata) metadata = {'start': start, 'end': end, 'size': ndarray.size, 'dtype': ndarray.dtype.name, 'shape': ndarray.shape } filelike.write(bytedata) return metadata
Serializes an N-dimensional ``numpy.array`` to bytes, writes the bytes to the filelike object and returns a dictionary with metadata, necessary to restore the ``numpy.array`` from the file. :param filelike: can be a file or a file-like object that provides the methods ``.write()`` and ``.tell()``. :param ndarray: a N-dimensional ``numpy.array`` :returns: a metadata dictionary :: {'start': start position in the file, 'end': end position in the file, 'size': size of the array, 'dtype': numpy data type of the array, 'shape': description of the array shape }
def loadBinaryItemContainer(zippedfile, jsonHook): binaryItemContainer = dict() with zipfile.ZipFile(zippedfile, 'r') as containerZip: #Convert the zipfile data into a str object, necessary since #containerZip.read() returns a bytes object. metadataText = io.TextIOWrapper(containerZip.open('metadata'), encoding='utf-8' ).read() allMetadata = json.loads(metadataText, object_hook=jsonHook) metadataIndex = [str(_) for _ in sorted([int(i) for i in viewkeys(allMetadata) ]) ] binarydataFile = containerZip.open('binarydata') for index in metadataIndex: binaryItem = allMetadata[index][0] for binaryMetadata in allMetadata[index][1]: arrayKey = binaryMetadata['arrayKey'] rawdata = binarydataFile.read(binaryMetadata['end'] - binaryMetadata['start'] ) array = _arrayFromBytes(rawdata, binaryMetadata) binaryItem.arrays[arrayKey] = array binaryItemContainer[binaryItem.id] = binaryItem return binaryItemContainer
Imports binaryItems from a zipfile generated by :func:`writeBinaryItemContainer`. :param zipfile: can be either a path to a file (a string) or a file-like object :param jsonHook: a custom decoding function for JSON formated strings of the binaryItems stored in the zipfile. :returns: a dictionary containing binaryItems ``{binaryItem.id: binaryItem, ... }``
def _arrayFromBytes(dataBytes, metadata): array = numpy.fromstring(dataBytes, dtype=numpy.typeDict[metadata['dtype']]) if 'shape' in metadata: array = array.reshape(metadata['shape']) return array
Generates and returns a numpy array from raw data bytes. :param bytes: raw data bytes as generated by ``numpy.ndarray.tobytes()`` :param metadata: a dictionary containing the data type and optionally the shape parameter to reconstruct a ``numpy.array`` from the raw data bytes. ``{"dtype": "float64", "shape": (2, 3)}`` :returns: ``numpy.array``
def searchFileLocation(targetFileName, targetFileExtension, rootDirectory, recursive=True): expectedFileName = targetFileName.split('.')[0] + '.' + targetFileExtension targetFilePath = None if recursive: for dirpath, dirnames, filenames in os.walk(rootDirectory): for filename in filenames: if filename == expectedFileName: targetFilePath = joinpath(dirpath, filename) break if targetFilePath is not None: break else: for filename in os.listdir(rootDirectory): filePath = joinpath(rootDirectory, filename) if not os.path.isfile(filePath): continue if filename == expectedFileName: targetFilePath = filePath break return targetFilePath
Search for a filename with a specified file extension in all subfolders of specified rootDirectory, returns first matching instance. :param targetFileName: #TODO: docstring :type targetFileName: str :param rootDirectory: #TODO: docstring :type rootDirectory: str :param targetFileExtension: #TODO: docstring :type targetFileExtension: str :param recursive: bool, specify whether subdirectories should be searched :returns: a filepath (str) or None
def matchingFilePaths(targetfilename, directory, targetFileExtension=None, selector=None): targetFilePaths = list() targetfilename = os.path.splitext(targetfilename)[0] targetFileExtension = targetFileExtension.replace('.', '') matchExtensions = False if targetFileExtension is None else True if selector is None: selector = functools.partial(operator.eq, targetfilename) for dirpath, dirnames, filenames in os.walk(directory): for filename in filenames: filenameNoextension = os.path.splitext(filename)[0] if selector(filenameNoextension): if matchExtensions: if not filename.endswith('.' + targetFileExtension): continue targetFilePaths.append(joinpath(dirpath, filename)) return targetFilePaths
Search for files in all subfolders of specified directory, return filepaths of all matching instances. :param targetfilename: filename to search for, only the string before the last "." is used for filename matching. Ignored if a selector function is specified. :param directory: search directory, including all subdirectories :param targetFileExtension: string after the last "." in the filename, has to be identical if specified. "." in targetFileExtension are ignored, thus ".txt" is treated equal to "txt". :param selector: a function which is called with the value of targetfilename and has to return True (include value) or False (discard value). If no selector is specified, equality to targetfilename is used. :returns: list of matching file paths (str)
def listFiletypes(targetfilename, directory): targetextensions = list() for filename in os.listdir(directory): if not os.path.isfile(joinpath(directory, filename)): continue splitname = filename.split('.') basename = splitname[0] extension = '.'.join(splitname[1:]) if basename == targetfilename: targetextensions.append(extension) return targetextensions
Looks for all occurences of a specified filename in a directory and returns a list of all present file extensions of this filename. In this cas everything after the first dot is considered to be the file extension: ``"filename.txt" -> "txt"``, ``"filename.txt.zip" -> "txt.zip"`` :param targetfilename: a filename without any extensions :param directory: only files present in this directory are compared to the targetfilename :returns: a list of file extensions (str)
def findAllSubstrings(string, substring): #TODO: solve with regex? what about '.': #return [m.start() for m in re.finditer('(?='+substring+')', string)] start = 0 positions = [] while True: start = string.find(substring, start) if start == -1: break positions.append(start) #+1 instead of +len(substring) to also find overlapping matches start += 1 return positions
Returns a list of all substring starting positions in string or an empty list if substring is not present in string. :param string: a template string :param substring: a string, which is looked for in the ``string`` parameter. :returns: a list of substring starting positions in the template string
def toList(variable, types=(basestring, int, float, )): if isinstance(variable, types): return [variable] else: return variable
Converts a variable of type string, int, float to a list, containing the variable as the only element. :param variable: any python object :type variable: (str, int, float, others) :returns: [variable] or variable
def calcDeviationLimits(value, tolerance, mode): values = toList(value) if mode == 'relative': lowerLimit = min(values) * (1 - tolerance) upperLimit = max(values) * (1 + tolerance) elif mode == 'absolute': lowerLimit = min(values) - tolerance upperLimit = max(values) + tolerance else: raise Exception('mode %s not specified' %(filepath, )) return lowerLimit, upperLimit
Returns the upper and lower deviation limits for a value and a given tolerance, either as relative or a absolute difference. :param value: can be a single value or a list of values if a list of values is given, the minimal value will be used to calculate the lower limit and the maximum value to calculate the upper limit :param tolerance: a number used to calculate the limits :param mode: either ``absolute`` or ``relative``, specifies how the ``tolerance`` should be applied to the ``value``.
def returnArrayFilters(arr1, arr2, limitsArr1, limitsArr2): posL = bisect.bisect_left(arr1, limitsArr1[0]) posR = bisect.bisect_right(arr1, limitsArr1[1]) matchMask = ((arr2[posL:posR] <= limitsArr2[1]) & (arr2[posL:posR] >= limitsArr2[0]) ) return posL, posR, matchMask
#TODO: docstring :param arr1: #TODO: docstring :param arr2: #TODO: docstring :param limitsArr1: #TODO: docstring :param limitsArr2: #TODO: docstring :returns: #TODO: docstring
def applyArrayFilters(array, posL, posR, matchMask): return numpy.compress(matchMask, array[posL:posR], axis=0)
#TODO: docstring :param array: #TODO: docstring :param posL: #TODO: docstring :param posR: #TODO: docstring :param matchMask: #TODO: docstring :returns: ``numpy.array``, a subset of the input ``array``.
def averagingData(array, windowSize=None, averagingType='median'): assert averagingType in ['median', 'mean'] if windowSize is None: windowSize = int(len(array) / 50) if int(len(array) / 50) > 100 else 100 if averagingType == 'median': averagedData = runningMedian(array, windowSize) elif averagingType == 'mean': averagedData = runningMean(array, len(array), windowSize) return averagedData
#TODO: docstring :param array: #TODO: docstring :param windowSize: #TODO: docstring :param averagingType: "median" or "mean" :returns: #TODO: docstring
def open(self, filepath, mode='w+b'): #Check if the filepath can be accessed and is writable before creating #the tempfile if not _isFileAccessible(filepath): raise IOError('File %s is not writable' % (filepath,)) if filepath in self._files: with open(self._files[filepath], mode=mode) as tmpf: yield tmpf else: tempfilepath = None with tempfile.NamedTemporaryFile(delete=False, mode=mode) as tmpf: tempfilepath = tmpf.name yield tmpf self._files[filepath] = tempfilepath
Opens a file - will actually return a temporary file but replace the original file when the context is closed.
def default(self, obj): if hasattr(obj, '_reprJSON'): return obj._reprJSON() #Let the base class default method raise the TypeError return json.JSONEncoder.default(self, obj)
:returns: obj._reprJSON() if it is defined, else json.JSONEncoder.default(obj)
def processInput(self, dataAveraging=False, windowSize=None): self.dependentVar = numpy.array(self.dependentVarInput, dtype=numpy.float64 ) self.independentVar = numpy.array(self.independentVarInput, dtype=numpy.float64 ) sortMask = self.independentVar.argsort() self.dependentVar = self.dependentVar[sortMask] self.independentVar = self.independentVar[sortMask] if dataAveraging: averagedData = averagingData(self.dependentVar, windowSize=windowSize, averagingType=dataAveraging ) averagedData = numpy.array(averagedData, dtype=numpy.float64) missingNumHigh = numpy.floor((self.independentVar.size - averagedData.size ) / 2 ) missingNumLow = ((self.independentVar.size - averagedData.size) - missingNumHigh ) self.dependentVar = averagedData self.independentVar = self.independentVar[missingNumLow: -missingNumHigh]
#TODO: docstring :param dataAveraging: #TODO: docstring :param windowSize: #TODO: docstring
def generateSplines(self): _ = returnSplineList(self.dependentVar, self.independentVar, subsetPercentage=self.splineSubsetPercentage, cycles=self.splineCycles, minKnotPoints=self.splineMinKnotPoins, initialKnots=self.splineInitialKnots, splineOrder=self.splineOrder, terminalExpansion=self.splineTerminalExpansion ) self.splines = _
#TODO: docstring
def corrArray(self, inputArray): outputArray = numpy.vstack([numpy.nan_to_num(currSpline(inputArray)) for currSpline in self.splines ]).mean(axis=0) return outputArray
#TODO: docstring :param inputArray: #TODO: docstring :returns: #TODO docstring
def fixminimized(self, alphabet): endstate = len(list(self.states)) for state in self.states: for char in alphabet: found = 0 for arc in state.arcs: if self.isyms.find(arc.ilabel) == char: found = 1 break if found == 0: self.add_arc(state.stateid, endstate, char) self[endstate].final = TropicalWeight(float('inf')) for char in alphabet: self.add_arc(endstate, endstate, char)
After pyfst minimization, all unused arcs are removed, and all sink states are removed. However this may break compatibility. Args: alphabet (list): The input alphabet Returns: None
def _path_to_str(self, path): inp = '' for arc in path: i = self.isyms.find(arc.ilabel) # Ignore \epsilon transitions both on input if i != fst.EPSILON: inp += i return inp
Convert a path to the string representing the path Args: path (tuple): A tuple of arcs Returns: inp (str): The path concatenated as as string
def init_from_acceptor(self, acceptor): states = sorted( acceptor.states, key=attrgetter('initial'), reverse=True) for state in states: for arc in state.arcs: itext = acceptor.isyms.find(arc.ilabel) if itext in self.alphabet: self.add_arc(state.stateid, arc.nextstate, itext) if state.final: self[state.stateid].final = True if state.initial: self[state.stateid].initial = True
Adds a sink state Args: alphabet (list): The input alphabet Returns: None
def consume_input(self, inp): cur_state = sorted( self.states, key=attrgetter('initial'), reverse=True)[0] while len(inp) > 0: found = False for arc in cur_state.arcs: if self.isyms.find(arc.ilabel) == inp[0]: cur_state = self[arc.nextstate] inp = inp[1:] found = True break if not found: return False return cur_state.final != TropicalWeight(float('inf'))
Return True/False if the machine accepts/reject the input. Args: inp (str): input string to be consumed Returns: bool: A true or false value depending on if the DFA accepts the provided input
def random_strings(self, string_length=1): str_list = [] for path in self.uniform_generate(string_length): str_list.append(self._path_to_str(path)) return str_list
Generate string_length random strings that belong to the automaton. Args: string_length (integer): The size of the random string Returns: str: The generated string
def save(self, txt_fst_filename): txt_fst = open(txt_fst_filename, 'w+') states = sorted(self.states, key=attrgetter('initial'), reverse=True) for state in states: for arc in state.arcs: itext = self.isyms.find(arc.ilabel) otext = self.osyms.find(arc.ilabel) txt_fst.write( '{}\t{}\t{}\t{}\n'.format( state.stateid, arc.nextstate, itext.encode('hex'), otext.encode('hex'))) if state.final: txt_fst.write('{}\n'.format(state.stateid)) txt_fst.close()
Save the machine in the openFST format in the file denoted by txt_fst_filename. Args: txt_fst_filename (str): The name of the file Returns: None
def load(self, txt_fst_filename): with open(txt_fst_filename, 'r') as txt_fst: for line in txt_fst: line = line.strip() splitted_line = line.split() if len(splitted_line) == 1: self[int(splitted_line[0])].final = True else: self.add_arc(int(splitted_line[0]), int( splitted_line[1]), splitted_line[2].decode('hex'))
Save the transducer in the text file format of OpenFST. The format is specified as follows: arc format: src dest ilabel olabel [weight] final state format: state [weight] lines may occur in any order except initial state must be first line Args: txt_fst_filename (string): The name of the file Returns: None
def persistent_menu(menu): if len(menu) > 3: raise Invalid('menu should not exceed 3 call to actions') if any(len(item['call_to_actions']) > 5 for item in menu if item['type'] == 'nested'): raise Invalid('call_to_actions is limited to 5 for sub-levels') for item in menu: if len(item['title']) > 30: raise Invalid('menu item title should not exceed 30 characters') if item['type'] == 'postback' and len(item['payload']) > 1000: raise Invalid('menu item payload should not exceed 1000 characters')
more: https://developers.facebook.com/docs/messenger-platform/thread-settings/persistent-menu :param menu: :return:
def send_text_message(text, quick_replies): if len(text) > 640: raise ExceedLengthException( 'send message text should not exceed 640 character limit', limit=640, ) if isinstance(quick_replies, list): if len(quick_replies) > 10: raise Invalid('send message quick replies should not exceed 10 limit') for item in quick_replies: if 'content_type' not in item: raise Invalid('send message quick replies should have content_type') if item['content_type'] == 'text': if len(item['title']) > 20: raise Invalid('send message quick replies title should not exceed 20 character limit') if len(item['payload']) > 1000: raise Invalid('send message quick replies payload should not exceed 1000 character limit')
more: https://developers.facebook.com/docs/messenger-platform/send-api-reference/text-message and https://developers.facebook.com/docs/messenger-platform/send-api-reference/quick-replies :param text: :param quick_replies: :return:
def refactor(self, symbol, value): if value: self.pset.add(symbol) else: self.pset.remove(symbol)
Args: symbol: value: Returns: None
def add_state(self): sid = len(self.states) self.states.append(SFAState(sid))
This function adds a new state
def add_arc(self, src, dst, char): assert type(src) == type(int()) and type(dst) == type(int()), \ "State type should be integer." while src >= len(self.states) or dst >= len(self.states): self.add_state() self.states[src].arcs.append(SFAArc(src, dst, char))
This function adds a new arc in a SFA state Args: src (int): The source state identifier dst (int): The destination state identifier char (str): The transition symbol Returns: None
def consume_input(self, inp): cur_state = self.states[0] for character in inp: found = False for arc in cur_state.arcs: if arc.guard.is_sat(character): cur_state = self.states[arc.dst_state] found = True break if not found: raise RuntimeError('SFA not complete') return cur_state.final
Return True/False if the machine accepts/reject the input. Args: inp (str): input string to be consumed Retunrs: bool: A true or false value depending on if the DFA accepts the provided input
def concretize(self): dfa = DFA(self.alphabet) for state in self.states: for arc in state.arcs: for char in arc.guard: dfa.add_arc(arc.src_state, arc.dst_state, char) for i in xrange(len(self.states)): if self.states[i].final: dfa[i].final = True return dfa
Transforms the SFA into a DFA Args: None Returns: DFA: The generated DFA
def _write(self, ret): self.redis.set('{0}:{1}'.format(ret['id'], ret['jid']), json.dumps(ret)) self.redis.lpush('{0}:{1}'.format(ret['id'], ret['fun']), ret['jid']) self.redis.sadd('minions', ret['id']) self.redis.sadd('jids', ret['jid'])
This function needs to correspond to this: https://github.com/saltstack/salt/blob/develop/salt/returners/redis_return.py#L88
def _initAddons(cls, recurse=True): for addon_module in cls.addonModules(recurse): projex.importmodules(addon_module)
Initializes the addons for this manager.
def addons(cls, recurse=True): cls.initAddons() prop = '_{0}__addons'.format(cls.__name__) out = {} # lookup base classes if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): out.update(base.addons(recurse)) # always use the highest level for any given key out.update(getattr(cls, prop, {})) return out
Returns a dictionary containing all the available addons for this mixin class. If the optional recurse flag is set to True, then all the base classes will be searched for the given addon as well. :param recurse | <bool> :return {<str> name: <variant> addon, ..}
def addonModules(cls, recurse=True): prop = '_{0}__addon_modules'.format(cls.__name__) out = set() # lookup base classes if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): out.update(base.addonModules(recurse)) # always use the highest level for any given key out.update(getattr(cls, prop, set())) return out
Returns all the modules that this addon class uses to load plugins from. :param recurse | <bool> :return [<str> || <module>, ..]
def byName(cls, name, recurse=True, default=None): cls.initAddons() prop = '_{0}__addons'.format(cls.__name__) try: return getattr(cls, prop, {})[name] except KeyError: if recurse: for base in cls.__bases__: if issubclass(base, AddonManager): return base.byName(name, recurse) return default
Returns the addon whose name matches the inputted name. If the optional recurse flag is set to True, then all the base classes will be searched for the given addon as well. If no addon is found, the default is returned. :param name | <str> recurse | <bool> default | <variant>
def initAddons(cls, recurse=True): key = '_{0}__addons_loaded'.format(cls.__name__) if getattr(cls, key, False): return cls._initAddons(recurse) setattr(cls, key, True)
Loads different addon modules for this class. This method should not be overloaded in a subclass as it also manages the loaded state to avoid duplicate loads. Instead, you can re-implement the _initAddons method for custom loading. :param recurse | <bool>
def registerAddon(cls, name, addon, force=False): prop = '_{0}__addons'.format(cls.__name__) cmds = getattr(cls, prop, {}) if name in cmds and not force: raise errors.AddonAlreadyExists(cls, name, addon) cmds[name] = addon try: if issubclass(addon, cls): setattr(addon, '_{0}__addonName'.format(addon.__name__), name) except StandardError: pass setattr(cls, prop, cmds)
Registers the inputted addon to the class. :param name | <str> addon | <variant>
def registerAddonModule(cls, module): prop = '_{0}__addon_modules'.format(cls.__name__) mods = getattr(cls, prop, set()) mods.add(module) setattr(cls, prop, mods)
Registers a module to use to import addon subclasses from. :param module | <str> || <module>
def unregisterAddon(cls, name): prop = '_{0}__addons'.format(cls.__name__) cmds = getattr(cls, prop, {}) cmds.pop(name, None)
Unregisters the addon defined by the given name from the class. :param name | <str>
def unregisterAddonModule(cls, module): prop = '_{0}__addon_modules'.format(cls.__name__) mods = getattr(cls, prop, set()) try: mods.remove(module) except KeyError: pass
Unregisters the module to use to import addon subclasses from. :param module | <str> || <module>
def emit(self, record): if not logging.raiseExceptions: return logger = logging.getLogger(record.name) # raise an exception based on the error logging if logger.level <= record.levelno: err = record.msg[0] if not isinstance(err, Exception): err = ProjexError(nstr(record.msg)) # log the traceback info data = record.__dict__.copy() data['type'] = type(err).__name__ msg = ERROR_MESSAGE % data sys.stderr.write(msg) raise err
Throws an error based on the information that the logger reported, given the logging level. :param record: <logging.LogRecord>
def cli(ctx, stage): if not ctx.bubble: ctx.say_yellow('There is no bubble present, will not listen') raise click.Abort() SRC = None if stage in STAGES: try: SRC = ctx.cfg.CFG[stage].SOURCE except KeyError: pass if not SRC: ctx.say_red('There is no SOURCE in stage:' + stage) ctx.say_yellow('please check configuration in ' + ctx.home + '/config/config.yaml') raise click.Abort() if 'SERVER' not in SRC: ctx.say_red('There is no SOURCE.SERVER in stage:' + stage) raise click.Abort() src_server = get_server(SRC.SERVER, ctx.home) # connect storage / pipeline to target via transform # write state listening on port etc into def message_handler(**m): print(str(arrow.now), str(m)) return True, 'handled' try: # TODO: bg & # src_listening = src_server.listen(cfg=SRC, src_server.listen(cfg=SRC, push_handler=message_handler, pull_handler=message_handler) except Exception as e: ctx.say_red( 'cannot listen from source client bubble.clients.' + SRC.SERVER) ctx.say_red(str(e)) raise click.Abort('cannot listen')
listen to push requests for src and pull requests from target (experimental)
def get_field_label_css_class(self, bound_field): # If we render CheckboxInputs, Bootstrap requires a different # field label css class for checkboxes. if isinstance(bound_field.field.widget, forms.CheckboxInput): return 'form-check-label' return super().get_field_label_css_class(bound_field)
Returns 'form-check-label' if widget is CheckboxInput. For all other fields, no css class is added.
def get_widget_css_class(self, field_name, field): # If we render CheckboxInputs, Bootstrap requires a different # widget css class for checkboxes. if isinstance(field.widget, forms.CheckboxInput): return 'form-check-input' # Idem for fileinput. if isinstance(field.widget, forms.FileInput): return 'form-control-file' return super().get_widget_css_class(field_name, field)
Returns 'form-check-input' if widget is CheckboxInput or 'form-control-file' if widget is FileInput. For all other fields return the default value from the form property ("form-control").
def handle(self): if self.component_type == StreamComponent.SOURCE: msg = self.handler_function() return self.__send(msg) logger = self.logger data = self.__receive() if data is None: return False else: logger.debug("Calling %s " % self.handler_function) result = self.handler_function(data.decode(self.char_encoding)) if self.component_type == StreamComponent.PROCESSOR: logger.debug("Sending p3:%s %s %s" % (PYTHON3, result, str(type(result)))) if not self.__send(result): return False return True
Handle a message :return: True if success, False otherwise
def realpath_with_context(path, context): if not os.path.isabs(path): # XXX ensure_workdir_exists(context) assert context.workdir path = os.path.join(context.workdir, os.path.normpath(path)) return path
Convert a path into its realpath: * For relative path: use :attr:`context.workdir` as root directory * For absolute path: Pass-through without any changes. :param path: Filepath to convert (as string). :param context: Behave context object (with :attr:`context.workdir`) :return: Converted path.
def posixpath_normpath(pathname): backslash = '\\' pathname2 = os.path.normpath(pathname) or "." if backslash in pathname2: pathname2 = pathname2.replace(backslash, '/') return pathname2
Convert path into POSIX path: * Normalize path * Replace backslash with slash :param pathname: Pathname (as string) :return: Normalized POSIX path.
def create_textfile_with_contents(filename, contents, encoding='utf-8'): ensure_directory_exists(os.path.dirname(filename)) if os.path.exists(filename): os.remove(filename) outstream = codecs.open(filename, "w", encoding) outstream.write(contents) if contents and not contents.endswith("\n"): outstream.write("\n") outstream.flush() outstream.close() assert os.path.exists(filename), "ENSURE file exists: %s" % filename
Creates a textual file with the provided contents in the workdir. Overwrites an existing file.
def ensure_directory_exists(dirname, context=None): real_dirname = dirname if context: real_dirname = realpath_with_context(dirname, context) if not os.path.exists(real_dirname): os.makedirs(real_dirname) assert os.path.exists(real_dirname), "ENSURE dir exists: %s" % dirname assert os.path.isdir(real_dirname), "ENSURE isa dir: %s" % dirname
Ensures that a directory exits. If it does not exist, it is automatically created.
def p_andnode_expression(self, t): '''andnode_expression : LB identlist RB ''' self.accu.add(Term('vertex', ["and(\""+t[2]+"\")"])) t[0] = "and(\""+t[2]+"\")f p_andnode_expression(self, t): '''andnode_expression : LB identlist RB ''' self.accu.add(Term('vertex', ["and(\""+t[2]+"\")"])) t[0] = "and(\""+t[2]+"\")"
andnode_expression : LB identlist RB
def p_identlist(self, t): '''identlist : IDENT | NOT IDENT | IDENT AND identlist | NOT IDENT AND identlist ''' if len(t)==5 : #print(t[1],t[2],t[3],t[4]) t[0] = t[1]+t[2]+t[3]+t[4] elif len(t)==4 : #print(t[1],t[2],t[3]) t[0] = t[1]+t[2]+t[3] elif len(t)==3 : #print(t[1],t[2]) t[0] = t[1]+t[2] elif len(t)==2 : #print(t[0],t[1]) t[0]=t[1] else: print("Syntax error at '",str(t),"'"f p_identlist(self, t): '''identlist : IDENT | NOT IDENT | IDENT AND identlist | NOT IDENT AND identlist ''' if len(t)==5 : #print(t[1],t[2],t[3],t[4]) t[0] = t[1]+t[2]+t[3]+t[4] elif len(t)==4 : #print(t[1],t[2],t[3]) t[0] = t[1]+t[2]+t[3] elif len(t)==3 : #print(t[1],t[2]) t[0] = t[1]+t[2] elif len(t)==2 : #print(t[0],t[1]) t[0]=t[1] else: print("Syntax error at '",str(t),"'")
identlist : IDENT | NOT IDENT | IDENT AND identlist | NOT IDENT AND identlist
def deserialize(self, msg): 'deserialize output to a Python object' self.logger.debug('deserializing %s', msg) return json.loads(msgf deserialize(self, msg): 'deserialize output to a Python object' self.logger.debug('deserializing %s', msg) return json.loads(msg)
deserialize output to a Python object
def append_request_id(req, resp, resource, params): def get_headers(resp): if hasattr(resp, 'headers'): return resp.headers if hasattr(resp, '_headers'): return resp._headers return None if(isinstance(resp, Response) or (get_headers(resp) is not None)): # Extract 'x-request-id' from headers if # response is a Response object. request_id = get_headers(resp).get('x-request-id') else: # If resp is of type string or None. request_id = resp if resource.req_ids is None: resource.req_ids = [] if request_id not in resource.req_ids: resource.req_ids.append(request_id)
Append request id which got from response header to resource.req_ids list.