code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def is_waiting_for_input(self): return self.waiting_for and \ not isinstance(self.waiting_for, forking.SwitchOnValue) and \ not is_base_type(self.waiting_for)
could make one step further :return:
def alias(self): if self._alias is None: if self.name in self.aliases_fix: self._alias = self.aliases_fix[self.name] else: self._alias = self.name.lower()\ .replace(' ', '-')\ .replace('(', '')\ .replace(')', '') return self._alias
If the _alias cache is None, just build the alias from the item name.
def load_configs(self, conf_file): with open(conf_file) as stream: lines = itertools.chain(("[global]",), stream) self._config.read_file(lines) return self._config['global']
Assumes that the config file does not have any sections, so throw it all in global
def remove_quotes(self, configs): for key in configs: value = configs[key] if value[0] == "'" and value[-1] == "'": configs[key] = value[1:-1] return configs
Because some values are wraped in single quotes
def multikey_sort(items, columns): comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns ] def cmp(a, b): return (a > b) - (a < b) def comparer(left, right): comparer_iter = ( cmp(fn(left), fn(right)) * mult for fn, mult in comparers ) return next((result for result in comparer_iter if result), 0) return sorted(items, key=cmp_to_key(comparer))
Source: https://stackoverflow.com/questions/1143671/python-sorting-list-of-dictionaries-by-multiple-keys
def sanitize(string): replace_chars = [ ['\\', '-'], [':', '-'], ['/', '-'], ['?', ''], ['<', ''], ['>', ''], ['`', '`'], ['|', '-'], ['*', '`'], ['"', '\''], ['.', ''], ['&', 'and'] ] for ch in replace_chars: string = string.replace(ch[0], ch[1]) return string
Catch and replace invalid path chars [replace, with]
def chunks_of(max_chunk_size, list_to_chunk): for i in range(0, len(list_to_chunk), max_chunk_size): yield list_to_chunk[i:i + max_chunk_size]
Yields the list with a max size of max_chunk_size
def split_into(max_num_chunks, list_to_chunk): max_chunk_size = math.ceil(len(list_to_chunk) / max_num_chunks) return chunks_of(max_chunk_size, list_to_chunk)
Yields the list with a max total size of max_num_chunks
def norm_path(path): # path = os.path.normcase(path) path = os.path.expanduser(path) path = os.path.expandvars(path) path = os.path.normpath(path) return path
:return: Proper path for os with vars expanded out
def create_hashed_path(base_path, name, depth=2): if depth > 16: logger.warning("depth cannot be greater then 16, setting to 16") depth = 16 name_hash = hashlib.md5(str(name).encode('utf-8')).hexdigest() if base_path.endswith(os.path.sep): save_path = base_path else: save_path = base_path + os.path.sep for i in range(1, depth + 1): end = i * 2 start = end - 2 save_path += name_hash[start:end] + os.path.sep return {'path': save_path, 'hash': name_hash, }
Create a directory structure using the hashed filename :return: string of the path to save to not including filename/ext
def create_path(path, is_dir=False): path = norm_path(path) path_check = path if not is_dir: path_check = os.path.dirname(path) does_path_exists = os.path.exists(path_check) if does_path_exists: return path try: os.makedirs(path_check) except OSError: pass return path
Check if path exists, if not create it :param path: path or file to create directory for :param is_dir: pass True if we are passing in a directory, default = False :return: os safe path from `path`
def rate_limited(num_calls=1, every=1.0): frequency = abs(every) / float(num_calls) def decorator(func): """ Extend the behaviour of the following function, forwarding method invocations if the time window hes elapsed. Arguments: func (function): The function to decorate Returns: function: Decorated function """ # To get around issues with function local scope # and reassigning variables, we wrap the time # within a list. When updating the value we're # not reassigning `last_called`, which would not # work, but instead reassigning the value at a # particular index. last_called = [0.0] # Add thread safety lock = threading.RLock() def wrapper(*args, **kargs): """Decorator wrapper function""" with lock: elapsed = time.time() - last_called[0] left_to_wait = frequency - elapsed if left_to_wait > 0: time.sleep(left_to_wait) last_called[0] = time.time() return func(*args, **kargs) return wrapper return decorator
Source: https://github.com/tomasbasham/ratelimit/tree/0ca5a616fa6d184fa180b9ad0b6fd0cf54c46936 Need to make a few changes that included having num_calls be a float Prevent a method from being called if it was previously called before a time widows has elapsed. Keyword Arguments: num_calls (float): Maximum method invocations within a period. Must be greater than 0. every (float): A dampening factor (in seconds). Can be any number greater than 0. Return: function: Decorated function that will forward method invocations if the time window has elapsed.
def rate_limited_old(max_per_second): lock = threading.Lock() min_interval = 1.0 / max_per_second def decorate(func): last_time_called = time.perf_counter() @wraps(func) def rate_limited_function(*args, **kwargs): lock.acquire() nonlocal last_time_called try: elapsed = time.perf_counter() - last_time_called left_to_wait = min_interval - elapsed if left_to_wait > 0: time.sleep(left_to_wait) return func(*args, **kwargs) finally: last_time_called = time.perf_counter() lock.release() return rate_limited_function return decorate
Source: https://gist.github.com/gregburek/1441055
def timeit(stat_tracker_func, name): def _timeit(func): def wrapper(*args, **kw): start_time = time.time() result = func(*args, **kw) stop_time = time.time() stat_tracker_func(name, stop_time - start_time) return result return wrapper return _timeit
Pass in a function and the name of the stat Will time the function that this is a decorator to and send the `name` as well as the value (in seconds) to `stat_tracker_func` `stat_tracker_func` can be used to either print out the data or save it
def get_proxy_parts(proxy): proxy_parts = {'schema': None, 'user': None, 'password': None, 'host': None, 'port': None, } # Find parts results = re.match(proxy_parts_pattern, proxy) if results: matched = results.groupdict() for key in proxy_parts: proxy_parts[key] = matched.get(key) else: logger.error("Invalid proxy format `{proxy}`".format(proxy=proxy)) if proxy_parts['port'] is None: proxy_parts['port'] = '80' return proxy_parts
Take a proxy url and break it up to its parts
def remove_html_tag(input_str='', tag=None): result = input_str if tag is not None: pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag)) result = re.sub(pattern, '', str(input_str)) return result
Returns a string with the html tag and all its contents from a string
def asodict(self, handlepoints=True, reportpoints=True): out = odict() if handlepoints: for hp in self.handlepoints: out[hp.hpoint] = hp.trace if reportpoints: for rp in self.reportpoints: if not (rp.rpoint in out): out[rp.rpoint] = odict() out[rp.rpoint][self.attribute] = {'value' : rp.value, 'extended': rp.extended} return out
Returns an ordered dictionary of handle/report points
def asodict(self, freports=True, handlepoints=True, reportpoints=True): out = odict() if freports: for fr in self.freports: out[fr.num] = {'tstamp' : fr.tstamp, 'report' : fr.asodict(handlepoints, reportpoints)} if handlepoints: for hp in self.handlepoints: out[hp.hpoint] = hp.trace if reportpoints: for rp in self.reportpoints: if not (rp.rpoint in out): out[rp.rpoint] = odict() out[rp.rpoint][self.attribute] = {'value' : rp.value, 'extended': rp.extended} return out
Returns an ordered dictionary of feed, and handle/report points
def ip_between(ip, start, finish): if is_IPv4Address(ip) and is_IPv4Address(start) and is_IPv4Address(finish): return IPAddress(ip) in IPRange(start, finish) else: return False
Checks to see if IP is between start and finish
def is_rfc1918(ip): if ip_between(ip, "10.0.0.0", "10.255.255.255"): return True elif ip_between(ip, "172.16.0.0", "172.31.255.255"): return True elif ip_between(ip, "192.168.0.0", "192.168.255.255"): return True else: return False
Checks to see if an IP address is used for local communications within a private network as specified by RFC 1918
def is_reserved(ip): if ip_between(ip, "0.0.0.0", "0.255.255.255"): return True elif ip_between(ip, "10.0.0.0", "10.255.255.255"): return True elif ip_between(ip, "100.64.0.0", "100.127.255.255"): return True elif ip_between(ip, "127.0.0.0", "127.255.255.255"): return True elif ip_between(ip, "169.254.0.0", "169.254.255.255"): return True elif ip_between(ip, "172.16.0.0", "172.31.255.255"): return True elif ip_between(ip, "192.0.0.0", "192.0.0.255"): return True elif ip_between(ip, "192.0.2.0", "192.0.2.255"): return True elif ip_between(ip, "192.88.99.0", "192.88.99.255"): return True elif ip_between(ip, "192.168.0.0", "192.168.255.255"): return True elif ip_between(ip, "198.18.0.0", "198.19.255.255"): return True elif ip_between(ip, "198.51.100.0", "198.51.100.255"): return True elif ip_between(ip, "203.0.113.0", "203.0.113.255"): return True elif ip_between(ip, "224.0.0.0", "255.255.255.255"): return True else: return False
Checks to see if an IP address is reserved for special purposes. This includes all of the RFC 1918 addresses as well as other blocks that are reserved by IETF, and IANA for various reasons. https://en.wikipedia.org/wiki/Reserved_IP_addresses
def is_hash(fhash): # Intentionally doing if/else statement for ease of testing and reading if re.match(re_md5, fhash): return True elif re.match(re_sha1, fhash): return True elif re.match(re_sha256, fhash): return True elif re.match(re_sha512, fhash): return True elif re.match(re_ssdeep, fhash): return True else: return False
Returns true for valid hashes, false for invalid.
def ip_to_geojson(ipaddress, name="Point"): geo = ip_to_geo(ipaddress) point = { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": { "name": name }, "geometry": { "type": "Point", "coordinates": [ geo["longitude"], geo["latitude"] ] } } ] } return point
Generate GeoJSON for given IP address
def ips_to_geojson(ipaddresses): features = [] for ipaddress in ipaddresses: geo = gi.record_by_addr(ipaddress) features.append({ "type": "Feature", "properties": { "name": ipaddress }, "geometry": { "type": "Point", "coordinates": [ geo["longitude"], geo["latitude"] ] } }) points = { "type": "FeatureCollection", "features": features } return points
Generate GeoJSON for given IP address
def reverse_dns_sna(ipaddress): r = requests.get("http://api.statdns.com/x/%s" % ipaddress) if r.status_code == 200: names = [] for item in r.json()['answer']: name = str(item['rdata']).strip(".") names.append(name) return names elif r.json()['code'] == 503: # NXDOMAIN - no PTR record return None
Returns a list of the dns names that point to a given ipaddress using StatDNS API
def vt_ip_check(ip, vt_api): if not is_IPv4Address(ip): return None url = 'https://www.virustotal.com/vtapi/v2/ip-address/report' parameters = {'ip': ip, 'apikey': vt_api} response = requests.get(url, params=parameters) try: return response.json() except ValueError: return None
Checks VirusTotal for occurrences of an IP address
def vt_name_check(domain, vt_api): if not is_fqdn(domain): return None url = 'https://www.virustotal.com/vtapi/v2/domain/report' parameters = {'domain': domain, 'apikey': vt_api} response = requests.get(url, params=parameters) try: return response.json() except ValueError: return None
Checks VirusTotal for occurrences of a domain name
def vt_hash_check(fhash, vt_api): if not is_hash(fhash): return None url = 'https://www.virustotal.com/vtapi/v2/file/report' parameters = {'resource': fhash, 'apikey': vt_api} response = requests.get(url, params=parameters) try: return response.json() except ValueError: return None
Checks VirusTotal for occurrences of a file hash
def ipinfo_ip_check(ip): if not is_IPv4Address(ip): return None response = requests.get('http://ipinfo.io/%s/json' % ip) return response.json()
Checks ipinfo.io for basic WHOIS-type data on an IP address
def ipvoid_check(ip): if not is_IPv4Address(ip): return None return_dict = {} headers = {'User-Agent': useragent} url = 'http://ipvoid.com/scan/%s/' % ip response = requests.get(url, headers=headers) data = BeautifulSoup(response.text) if data.findAll('span', attrs={'class': 'label label-success'}): return None elif data.findAll('span', attrs={'class': 'label label-danger'}): for each in data.findAll('img', alt='Alert'): detect_site = each.parent.parent.td.text.lstrip() detect_url = each.parent.a['href'] return_dict[detect_site] = detect_url return return_dict
Checks IPVoid.com for info on an IP address
def urlvoid_check(name, api_key): if not is_fqdn(name): return None url = 'http://api.urlvoid.com/api1000/{key}/host/{name}'.format(key=api_key, name=name) response = requests.get(url) tree = ET.fromstring(response.text) if tree.find('./detections/engines'): return [e.text for e in tree.find('./detections/engines')] else: return None
Checks URLVoid.com for info on a domain
def urlvoid_ip_check(ip): if not is_IPv4Address(ip): return None return_dict = {} headers = {'User-Agent': useragent} url = 'http://urlvoid.com/ip/%s/' % ip response = requests.get(url, headers=headers) data = BeautifulSoup(response.text) h1 = data.findAll('h1')[0].text if h1 == 'Report not found': return None elif re.match('^IP', h1): return_dict['bad_names'] = [] return_dict['other_names'] = [] for each in data.findAll('img', alt='Alert'): return_dict['bad_names'].append(each.parent.text.strip()) for each in data.findAll('img', alt='Valid'): return_dict['other_names'].append(each.parent.text.strip()) return return_dict
Checks URLVoid.com for info on an IP address
def dshield_ip_check(ip): if not is_IPv4Address(ip): return None headers = {'User-Agent': useragent} url = 'https://isc.sans.edu/api/ip/' response = requests.get('{0}{1}?json'.format(url, ip), headers=headers) return response.json()
Checks dshield for info on an IP address
def validate(version, comparison): # match any if not comparison: return True # loop through all available opts = comparison.split(',') expr = re.compile('(==|!=|<=|>=|<|>)(.*)') for opt in opts: try: test, value = expr.match(opt.strip()).groups() except StandardError: raise errors.InvalidVersionDefinition(opt) value = value.strip() # test for an exact match if test == '==': if value == version: return True # test for negative exact matches elif test == '!=': if value == version: return False # test for range conditions elif test == '<': if vercmp(version, value) != -1: return False elif test == '<=': if vercmp(version, value) not in (-1, 0): return False elif test == '>': if vercmp(value, version) != -1: return False elif test == '>=': if vercmp(value, version) not in (-1, 0): return False return True
Returns whether or not the version for this plugin satisfies the inputted expression. The expression will follow the dependency declaration rules associated with setuptools in Python. More information can be found at [https://pythonhosted.org/setuptools/setuptools.html#declaring-dependencies] :param version | <str> expression | <str> :return <bool>
def _full_kind(details): kind = details[u"kind"] if details.get(u"group") is not None: kind += u"." + details[u"group"] return kind
Determine the full kind (including a group if applicable) for some failure details. :see: ``v1.Status.details``
def RemoveEmptyDirectoryTree(path, silent = False, recursion = 0): if not silent and recursion is 0: goodlogging.Log.Info("UTIL", "Starting removal of empty directory tree at: {0}".format(path)) try: os.rmdir(path) except OSError: if not silent: goodlogging.Log.Info("UTIL", "Removal of empty directory tree terminated at: {0}".format(path)) return else: if not silent: goodlogging.Log.Info("UTIL", "Directory deleted: {0}".format(path)) RemoveEmptyDirectoryTree(os.path.dirname(path), silent, recursion + 1)
Delete tree of empty directories. Parameters ---------- path : string Path to root of directory tree. silent : boolean [optional: default = False] Turn off log output. recursion : int [optional: default = 0] Indicates level of recursion.
def CheckPathExists(path): i = 0 root, ext = os.path.splitext(path) while os.path.exists(path): i = i + 1 goodlogging.Log.Info("UTIL", "Path {0} already exists".format(path)) path = "{0}_{1}".format(root, i) + ext return path
Check if path exists, if it does add number to path (incrementing until a unique path is found). Parameters ---------- path : string Path of directory to try. Returns ---------- string Path of unique directory.
def StripSpecialCharacters(string, stripAll = False): goodlogging.Log.Info("UTIL", "Stripping any special characters from {0}".format(string), verbosity=goodlogging.Verbosity.MINIMAL) string = string.strip() string = re.sub('[&]', 'and', string) string = re.sub(r'[@#$%^&*{};:,/<>?\\|`~=+±§£]', '', string) string = re.sub('\s\s+', ' ', string) if stripAll: string = re.sub('[_.-]', '', string) string = re.sub('\s', '', string) goodlogging.Log.Info("UTIL", "New string is: {0}".format(string), verbosity=goodlogging.Verbosity.MINIMAL) return string
Strips special characters, duplicate spaces and post/pre-ceeding spaces. Strip on single spaces, periods, hyphens and underscores is conditional on if stripAll is set Parameters ---------- string : string String to strip special characters from. stripAll : boolean [optional: default = False] If set will also strip single spaces, periods, hyphens and underscores. Returns ---------- string Resulting string with special characters removed.
def ValidUserResponse(response, validList): if response in validList: return response else: prompt = "Unknown response given - please reenter one of [{0}]: ".format('/'.join(validList)) response = goodlogging.Log.Input("DM", prompt) return ValidUserResponse(response, validList)
Check if user response is in a list of valid entires. If an invalid response is given re-prompt user to enter one of the valid options. Do not proceed until a valid entry is given. Parameters ---------- response : string Response string to check. validList : list A list of valid responses. Returns ---------- string A valid response string.
def GetBestMatch(target, matchList): bestMatchList = [] if len(matchList) > 0: ratioMatch = [] for item in matchList: ratioMatch.append(GetBestStringMatchValue(target, item)) maxRatio = max(ratioMatch) if maxRatio > 0.8: matchIndexList = [i for i, j in enumerate(ratioMatch) if j == maxRatio] for index in matchIndexList: if maxRatio == 1 and len(matchList[index]) == len(target): return [matchList[index], ] else: bestMatchList.append(matchList[index]) return bestMatchList
Finds the elements of matchList which best match the target string. Note that this searches substrings so "abc" will have a 100% match in both "this is the abc", "abcde" and "abc". The return from this function is a list of potention matches which shared the same highest match score. If any exact match is found (1.0 score and equal size string) this will be given alone. Parameters ---------- target : string Target string to match. matchList : list List of strings to match target against. Returns ---------- list A list of potention matches which share the same highest match score. If any exact match is found (1.0 score and equal size string) this will be given alone.
def GetBestStringMatchValue(string1, string2): # Ignore case string1 = string1.lower() string2 = string2.lower() # Ignore non-alphanumeric characters string1 = ''.join(i for i in string1 if i.isalnum()) string2 = ''.join(i for i in string2 if i.isalnum()) # Finding best match value between string1 and string2 if len(string1) == 0 or len(string2) == 0: bestRatio = 0 elif len(string1) == len(string2): match = difflib.SequenceMatcher(None, string1, string2) bestRatio = match.ratio() else: if len(string1) > len(string2): shortString = string2 longString = string1 else: shortString = string1 longString = string2 match = difflib.SequenceMatcher(None, shortString, longString) bestRatio = match.ratio() for block in match.get_matching_blocks(): subString = longString[block[1]:block[1]+block[2]] subMatch = difflib.SequenceMatcher(None, shortString, subString) if(subMatch.ratio() > bestRatio): bestRatio = subMatch.ratio() return(bestRatio)
Return the value of the highest matching substrings between two strings. Parameters ---------- string1 : string First string. string2 : string Second string. Returns ---------- int Integer value representing the best match found between string1 and string2.
def WebLookup(url, urlQuery=None, utf8=True): goodlogging.Log.Info("UTIL", "Looking up info from URL:{0} with QUERY:{1})".format(url, urlQuery), verbosity=goodlogging.Verbosity.MINIMAL) response = requests.get(url, params=urlQuery) goodlogging.Log.Info("UTIL", "Full url: {0}".format(response.url), verbosity=goodlogging.Verbosity.MINIMAL) if utf8 is True: response.encoding = 'utf-8' if(response.status_code == requests.codes.ok): return(response.text) else: response.raise_for_status()
Look up webpage at given url with optional query string Parameters ---------- url : string Web url. urlQuery : dictionary [optional: default = None] Parameter to be passed to GET method of requests module utf8 : boolean [optional: default = True] Set response encoding Returns ---------- string GET response text
def ArchiveProcessedFile(filePath, archiveDir): targetDir = os.path.join(os.path.dirname(filePath), archiveDir) goodlogging.Log.Info("UTIL", "Moving file to archive directory:") goodlogging.Log.IncreaseIndent() goodlogging.Log.Info("UTIL", "FROM: {0}".format(filePath)) goodlogging.Log.Info("UTIL", "TO: {0}".format(os.path.join(targetDir, os.path.basename(filePath)))) goodlogging.Log.DecreaseIndent() os.makedirs(targetDir, exist_ok=True) try: shutil.move(filePath, targetDir) except shutil.Error as ex4: err = ex4.args[0] goodlogging.Log.Info("UTIL", "Move to archive directory failed - Shutil Error: {0}".format(err))
Move file from given file path to archive directory. Note the archive directory is relative to the file path directory. Parameters ---------- filePath : string File path archiveDir : string Name of archive directory
def send(self,text): #print text self.s.write(text) time.sleep(0.001*len(text))
Send a string to the PiLite, can be simple text or a $$$ command
def send_wait(self,text): self.send(text) time.sleep(len(text)*PiLite.COLS_PER_CHAR*self.speed/1000.0)
Send a string to the PiLite, sleep until the message has been displayed (based on an estimate of the speed of the display. Due to the font not being monotype, this will wait too long in most cases
def set_speed(self,speed): self.speed=speed self.send_cmd("SPEED"+str(speed))
Set the display speed. The parameters is the number of milliseconds between each column scrolling off the display
def set_fb_pic(self,pattern): pattern=''.join(pattern.split()) # Remove whitespace pattern=pattern.replace('*','1') pattern=pattern.replace('.','0') fb='' for x in range(14): for y in range(9): fb+=pattern[y*14+x] self.set_fb(fb)
Set the "frame buffer". This allows "nice" string to be sent, because it first removes all whitespace, then transposes so that the X and Y axes are swapped, so what is seen in the file matches what will be seen on the screen. Also '.' and '*' can be used in place of 0 and 1.
def set_fb_random(self): pattern=''.join([random.choice(['0','1']) for i in xrange(14*9)]) self.set_fb(pattern)
Set the "frame buffer" to a random pattern
def set_pixel(self,x,y,state): self.send_cmd("P"+str(x+1)+","+str(y+1)+","+state)
Set pixel at "x,y" to "state" where state can be one of "ON", "OFF" or "TOGGLE"
def display_char(self,x,y,char): self.send_cmd("T"+str(x+1)+","+str(y+1)+","+char)
Display character "char" with its top left at "x,y"
def cli(ctx, amount, index, stage): ctx.obj.say_green('Starting Streaming Pipe') res_pull = ctx.invoke(pull, amount=amount, index=index, stage=stage) res_tra = False if res_pull: # amount to transform can be less (or more) res_tra = ctx.invoke( transform, amount=amount, index=index, stage=stage) if res_tra: # amount to push can be less (or more) res_push = ctx.invoke(push, amount=amount, index=index, stage=stage) if res_pull and res_tra and res_push: ctx.obj.say_green('Streaming Pipe finsished') return True return False
Pull, Transform, Push,streaming inside a pipe(experimental).
def camelHump(text): # make sure the first letter is upper case output = ''.join([word[0].upper() + word[1:] for word in words(text)]) if output: output = output[0].lower() + output[1:] return output
Converts the inputted text to camel humps by joining all capital letters toegether (The Quick, Brown, Fox.Tail -> TheQuickBrownFoxTail) :param: text <str> text to be changed :return: <str> :usage: |import projex.text |print projex.text.camelHump('The,Quick, Brown, Fox.Tail')
def capitalize(text): text = nativestring(text) if EXPR_CAPITALS.match(text): return text return text.capitalize()
Capitalizes the word using the normal string capitalization method, however if the word contains only capital letters and numbers, then it will not be affected. :param text | <str> :return <str>
def classname(text): if not text: return text text = camelHump(text) return text[0].upper() + text[1:]
Converts the inputted text to the standard classname format (camel humped with a capital letter to start. :return <str>
def encoded(text, encoding=DEFAULT_ENCODING): # already a string item if type(text) == bytes_type: return text elif type(text) != unicode_type: # convert a QString value if type(text).__name__ == 'QString': if encoding == 'utf-8': return unicode_type(text.toUtf8(), 'utf-8') elif encoding == 'latin-1': return unicode_type(text.toLatin1(), 'latin-1') elif encoding == 'ascii': return unicode_type(text.toAscii(), 'ascii') else: return unicode_type(text, encoding) # convert a standard item else: try: return bytes_type(text) except StandardError: return '????' if encoding: try: return text.encode(encoding) except StandardError: return text.encode(encoding, errors='ignore') else: for enc in SUPPORTED_ENCODINGS: try: return text.encode(enc) except StandardError: pass return '????'
Encodes the inputted unicode/string variable with the given encoding type. :param text | <variant> encoding | <str> :return <str>
def decoded(text, encoding=DEFAULT_ENCODING): # unicode has already been decoded if type(text) == unicode_type: return text elif type(text) != bytes_type: try: return unicode_type(text) except StandardError: try: text = bytes_type(text) except StandardError: msg = u'<< projex.text.decoded: unable to decode ({0})>>' return msg.format(repr(text)) if encoding: try: return text.decode(encoding) except StandardError: pass for enc in SUPPORTED_ENCODINGS: try: return text.decode(enc) except StandardError: pass return u'????'
Attempts to decode the inputted unicode/string variable using the given encoding type. If no encoding is provided, then it will attempt to use one of the ones available from the default list. :param text | <variant> encoding | <str> || None :return <unicode>
def nativestring(val, encodings=None): # if it is already a native python string, don't do anything if type(val) in (bytes_type, unicode_type): return val # otherwise, attempt to return a decoded value try: return unicode_type(val) except StandardError: pass try: return bytes_type(val) except StandardError: return decoded(val)
Converts the inputted value to a native python string-type format. :param val | <variant> encodings | (<str>, ..) || None :sa decoded :return <unicode> || <str>
def joinWords(text, separator=''): text = nativestring(text) output = separator.join(words(text.strip(separator))) # no need to check for bookended items when its an empty string if not separator: return output # look for beginning characters begin = re.match('^\%s+' % separator, text) if begin: output = begin.group() + output # make sure to not double up if begin.group() == text: return output # otherwise, look for the ending results end = re.search('\%s+$' % separator, text) if end: output += end.group() return output
Collects all the words from a text and joins them together with the inputted separator. :sa [[#words]] :param text <str> :param separator <str> :return <str> :usage |import projex |print projex.joinWords('This::is.a testTest','-')
def pluralize(word, count=None, format=u'{word}'): if count == 1: return word elif count is not None: return format.format(word=word, count=count) word = nativestring(word) if inflect_engine: return format.format(word=inflect_engine.plural(word)) all_upper = EXPR_UPPERCASE.match(word) is not None # go through the different plural expressions, searching for the # proper replacement for expr, plural in PLURAL_RULES: results = expr.match(word) if results: result_dict = results.groupdict() single = result_dict.get('single', '') # check if its capitalized if all_upper: return format.format(word=single + plural.upper()) else: return format.format(word=single + plural) # by default, just include 's' at the end if all_upper: return format.format(word=word + 'S') return format.format(word=word + 's')
Converts the inputted word to the plural form of it. This method works best if you use the inflect module, as it will just pass along the request to inflect.plural If you do not have that module, then a simpler and less impressive pluralization technique will be used. :sa https://pypi.python.org/pypi/inflect :param word | <str> :return <str>
def safe_eval(value): if not isinstance(value, (str, unicode)): return value try: return CONSTANT_EVALS[value] except KeyError: try: return ast.literal_eval(value) except StandardError: return value
Converts the inputted text value to a standard python value (if possible). :param value | <str> || <unicode> :return <variant>
def sectioned(text, sections=1): text = nativestring(text) if not text: return '' count = len(text) / max(1, sections) return ' '.join([text[i:i + count] for i in range(0, len(text), count)])
Splits the inputted text up into sections. :param text | <str> sections | <int> :return <str>
def singularize(word): word = toUtf8(word) if inflect_engine: result = inflect_engine.singular_noun(word) if result is False: return word return result # go through the different plural expressions, searching for the # proper replacement if word.endswith('ies'): return word[:-3] + 'y' elif word.endswith('IES'): return word[:-3] + 'Y' elif word.endswith('s') or word.endswith('S'): return word[:-1] return word
Converts the inputted word to the single form of it. This method works best if you use the inflect module, as it will just pass along the request to inflect.singular_noun. If you do not have that module, then a simpler and less impressive singularization technique will be used. :sa https://pypi.python.org/pypi/inflect :param word <str> :return <str>
def stemmed(text): terms = re.split('\s*', toAscii(text)) output = [] for term in terms: # ignore apostrophe's if term.endswith("'s"): stripped_term = term[:-2] else: stripped_term = term single_term = singularize(stripped_term) if term in COMMON_TERMS or stripped_term in COMMON_TERMS or single_term in COMMON_TERMS: continue output.append(single_term) return output
Returns a list of simplified and stemmed down terms for the inputted text. This will remove common terms and words from the search and return only the important root terms. This is useful in searching algorithms. :param text | <str> :return [<str>, ..]
def stripHtml(html, joiner=''): stripper = HTMLStripper() stripper.feed(html.replace('<br>', '\n').replace('<br/>', '\n')) return stripper.text(joiner)
Strips out the HTML tags from the inputted text, returning the basic text. This algorightm was found on [http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python StackOverflow]. :param html | <str> :return <str>
def truncate(text, length=50, ellipsis='...'): text = nativestring(text) return text[:length] + (text[length:] and ellipsis)
Returns a truncated version of the inputted text. :param text | <str> length | <int> ellipsis | <str> :return <str>
def toBytes(text, encoding=DEFAULT_ENCODING): if not text: return text if not isinstance(text, bytes_type): text = text.encode(encoding) return text
Converts the inputted text to base string bytes array. :param text | <variant> :return <str> || <bytes> (python3)
def toUnicode(data, encoding=DEFAULT_ENCODING): if isinstance(data, unicode_type): return data if isinstance(data, bytes_type): return unicode_type(data, encoding=encoding) if hasattr(data, '__iter__'): try: dict(data) except TypeError: pass except ValueError: return (toUnicode(i, encoding) for i in data) else: if hasattr(data, 'items'): data = data.items() return dict(((toUnicode(k, encoding), toUnicode(v, encoding)) for k, v in data)) return data
Converts the inputted data to unicode format. :param data | <str> || <unicode> || <iterable> :return <unicode> || <iterable>
def underscore(text, lower=True): out = joinWords(text, '_') if lower: return out.lower() return out
Splits all the words from the inputted text into being separated by underscores :sa [[#joinWords]] :param text <str> :return <str> :usage |import projex.text |print projex.text.underscore('TheQuick, Brown, Fox')
def xmlindent(elem, level=0, spacer=' '): i = "\n" + level * spacer if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + spacer if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: xmlindent(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i
Indents the inputted XML element based on the given indent level. :param elem | <xml.etree.Element>
def words(text): stext = nativestring(text) if not stext: return [] # first, split all the alphanumeric characters up phrases = EXPR_PHRASE.findall(stext) # second, split all the camel humped words output = [] for phrase in phrases: output += EXPR_WORD.findall(phrase) return output
Extracts a list of words from the inputted text, parsing out non-alphanumeric characters and splitting camel humps to build the list of words :param text <str> :return <str> :usage |import projex.text |print projex.text.words('TheQuick, TheBrown Fox.Tail')
def to_json(data): return json.dumps(data, default=lambda x: x.__dict__, sort_keys=True, indent=4)
Return data as a JSON string.
def convert_string(string, chars=None): if chars is None: chars = [',', '.', '-', '/', ':', ' '] for ch in chars: if ch in string: string = string.replace(ch, ' ') return string
Remove certain characters from a string.
def convert_time(time): split_time = time.split() try: # Get rid of period in a.m./p.m. am_pm = split_time[1].replace('.', '') time_str = '{0} {1}'.format(split_time[0], am_pm) except IndexError: return time try: time_obj = datetime.strptime(time_str, '%I:%M %p') except ValueError: time_obj = datetime.strptime(time_str, '%I %p') return time_obj.strftime('%H:%M %p')
Convert a time string into 24-hour time.
def convert_month(date, shorten=True, cable=True): month = date.split()[0].lower() if 'sept' in month: shorten = False if cable else True try: if shorten: month = SHORT_MONTHS[MONTHS.index(month)] else: month = MONTHS[SHORT_MONTHS.index(month)] except ValueError: month = month.title() return '{0} {1}'.format(month, ' '.join(date.split()[1:]))
Replace month by shortening or lengthening it. :param shorten: Set to True to shorten month name. :param cable: Set to True if category is Cable.
def convert_date(date): date = convert_month(date, shorten=False) clean_string = convert_string(date) return datetime.strptime(clean_string, DATE_FMT.replace('-',''))
Convert string to datetime object.
def date_in_range(date1, date2, range): date_obj1 = convert_date(date1) date_obj2 = convert_date(date2) return (date_obj2 - date_obj1).days <= range
Check if two date objects are within a specific range
def inc_date(date_obj, num, date_fmt): return (date_obj + timedelta(days=num)).strftime(date_fmt)
Increment the date by a certain number and return date object. as the specific string format.
def get_soup(url): html = requests.get(url, stream=True, headers=HEADERS) if html.status_code != 404: return BeautifulSoup(html.content, 'html.parser') else: return None
Request the page and return the soup.
def match_list(query_list, string): # Get rid of 'the' word to ease string matching match = False index = 0 string = ' '.join(filter_stopwords(string)) if not isinstance(query_list, list): query_list = [query_list] while index < len(query_list): query = query_list[index] words_query = filter_stopwords(query) match = all(word in string for word in words_query) if match: break index += 1 return match
Return True if all words in a word list are in the string. :param query_list: list of words to match :param string: the word or words to be matched against
def filter_stopwords(phrase): if not isinstance(phrase, list): phrase = phrase.split() stopwords = ['the', 'a', 'in', 'to'] return [word.lower() for word in phrase if word.lower() not in stopwords]
Filter out stop words and return as a list of words
def safe_unicode(string): if not PY3: uni = string.replace(u'\u2019', "'") return uni.encode('utf-8') return string
If Python 2, replace non-ascii characters and return encoded string.
def get_strings(soup, tag): tags = soup.find_all(tag) strings = [s.string for s in tags if s.string] return strings
Get all the string children from an html tag.
def _bld_op(self, op, num, **kwargs): kwargs['other'] = num setattr(self, op, {'mtype': pab, 'kwargs': kwargs})
implements pandas an operator
def _bld_pab_generic(self, funcname, **kwargs): margs = {'mtype': pab, 'kwargs': kwargs} setattr(self, funcname, margs)
implements a generic version of an attribute based pandas function
def _bld_pnab_generic(self, funcname, **kwargs): margs = {'mtype': pnab, 'kwargs': kwargs} setattr(self, funcname, margs)
implement's a generic version of a non-attribute based pandas function
def get(self, request, *args, **kwargs): cart = ShoppingCartProxy(request) return JsonResponse(cart.get_products(onlypublic=request.GET.get('onlypublic', True)))
List all products in the shopping cart
def post(self, request, *args, **kwargs): POST = json.loads(request.body.decode('utf-8')) if 'product_pk' in POST and 'quantity' in POST: cart = ShoppingCartProxy(request) cart.add( product_pk=int(POST['product_pk']), quantity=int(POST['quantity']) ) return JsonResponse(cart.products) return HttpResponseBadRequest()
Adds new product to the current shopping cart
def dispatch(self, *args, **kwargs): self.__line_pk = kwargs.get('pk', None) return super(LinesUpdateModalBasket, self).dispatch(*args, **kwargs)
if SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk).exists(): self.form_class = LineBasketFormPack self.__is_pack = True else: self.__is_pack = False
def get_form(self, form_class=None): # form_kwargs = super(LineBasketUpdateModal, self).get_form_kwargs(*args, **kwargs) form = super(LinesUpdateModalBasket, self).get_form(form_class) initial = form.initial initial['type_tax'] = self.object.product_final.product.tax.pk initial['tax'] = self.object.tax_basket initial['price'] = float(self.object.price_base_basket) * (1 + (self.object.tax_basket / 100)) return form
if self.__is_pack: options = [] lang = get_language_database() for option in SalesLineBasketOption.objects.filter(line_budget__pk=self.__line_pk): initial['packs[{}]'.format(option.product_option.pk)] = option.product_final.pk a = { 'id': option.product_option.pk, 'label': getattr(option.product_option, lang).name, 'products': list(option.product_option.products_pack.all().values('pk').annotate(name=F('{}__name'.format(lang)))), 'selected': option.product_final.pk, } options.append(a) # compatibility with GenForeignKey initial['packs'] = json.dumps({'__JSON_DATA__': options})
def register_signal(alias: str, signal: pyqtSignal): if SignalDispatcher.signal_alias_exists(alias): raise SignalDispatcherError('Alias "' + alias + '" for signal already exists!') SignalDispatcher.signals[alias] = signal
Used to register signal at the dispatcher. Note that you can not use alias that already exists. :param alias: Alias of the signal. String. :param signal: Signal itself. Usually pyqtSignal instance. :return:
def register_handler(alias: str, handler: callable): if SignalDispatcher.handlers.get(alias) is None: SignalDispatcher.handlers[alias] = [handler] else: SignalDispatcher.handlers.get(alias).append(handler)
Used to register handler at the dispatcher. :param alias: Signal alias to match handler to. :param handler: Handler. Some callable. :return:
def dispatch(): aliases = SignalDispatcher.signals.keys() for alias in aliases: handlers = SignalDispatcher.handlers.get(alias) signal = SignalDispatcher.signals.get(alias) if signal is None or handlers.__len__() == 0: continue for handler in handlers: signal.connect(handler)
This methods runs the wheel. It is used to connect signal with their handlers, based on the aliases. :return:
def signal_alias_exists(alias: str) -> bool: if SignalDispatcher.signals.get(alias): return True return False
Checks if signal alias exists. :param alias: Signal alias. :return:
def handler_alias_exists(alias: str) -> bool: if SignalDispatcher.handlers.get(alias): return True return False
Checks if handler alisa exists. :param alias: Handler alias. :return:
def get_function_data(minion, jid): redis = Redis(connection_pool=redis_pool) data = redis.get('{0}:{1}'.format(minion, jid)) return Response(response=data, status=200, mimetype="application/json")
AJAX access for loading function/job details.
def get_api_publisher(self, social_user): def _post(**kwargs): api = self.get_api(social_user) from pudb import set_trace; set_trace() # api.group.getInfo('uids'='your_group_id', 'fields'='members_count') #response = api.wall.post(**kwargs) return response return _post
and other https://vk.com/dev.php?method=wall.post
def _get_rev(self, fpath): rev = None try: cmd = ["git", "log", "-n1", "--pretty=format:\"%h\"", fpath] rev = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()[0] except: pass if not rev: try: cmd = ["svn", "info", fpath] svninfo = Popen(cmd, stdout=PIPE, stderr=PIPE).stdout.readlines() for info in svninfo: tokens = info.split(":") if tokens[0].strip() == "Last Changed Rev": rev = tokens[1].strip() except: pass return rev
Get an SCM version number. Try svn and git.
def execute_migrations(self, show_traceback=True): all_migrations = get_pending_migrations(self.path, self.databases) if not len(all_migrations): sys.stdout.write("There are no migrations to apply.\n") for db, migrations in all_migrations.iteritems(): connection = connections[db] # init connection cursor = connection.cursor() cursor.close() for migration in migrations: migration_path = self._get_migration_path(db, migration) with Transactional(): sys.stdout.write( "Executing migration %r on %r...." % (migration, db) ) created_models = self._execute_migration( db, migration_path, show_traceback=show_traceback ) emit_post_sync_signal( created_models=created_models, verbosity=self.verbosity, interactive=self.interactive, db=db, ) if self.load_initial_data: sys.stdout.write( "Running loaddata for initial_data fixtures on %r.\n" % db ) call_command( "loaddata", "initial_data", verbosity=self.verbosity, database=db, )
Executes all pending migrations across all capable databases
def handle(self, *args, **options): self.do_list = options.get("do_list") self.do_execute = options.get("do_execute") self.do_create = options.get("do_create") self.do_create_all = options.get("do_create_all") self.do_seed = options.get("do_seed") self.load_initial_data = options.get("load_initial_data", True) self.args = args if options.get("path"): self.path = options.get("path") else: default_path = self._get_default_migration_path() self.path = getattr( settings, "NASHVEGAS_MIGRATIONS_DIRECTORY", default_path ) self.verbosity = int(options.get("verbosity", 1)) self.interactive = options.get("interactive") self.databases = options.get("databases") # We only use the default alias in creation scenarios (upgrades # default to all databases) if self.do_create and not self.databases: self.databases = [DEFAULT_DB_ALIAS] if self.do_create and self.do_create_all: raise CommandError("You cannot combine --create and --create-all") self.init_nashvegas() if self.do_create_all: self.create_all_migrations() elif self.do_create: assert len(self.databases) == 1 self.create_migrations(self.databases[0]) if self.do_execute: self.execute_migrations() if self.do_list: self.list_migrations() if self.do_seed: self.seed_migrations()
Upgrades the database. Executes SQL scripts that haven't already been applied to the database.
def is_git_directory(path='.'): try: dulwich.repo.Repo.discover(path) except dulwich.errors.NotGitRepository: return False return True
Checks if given directory is a git repository :param path: path to check :return: True if it's a git repo and False otherwise