id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
5,500
i3visio/osrframework
osrframework/utils/platforms.py
Platform._getAuthenticated
def _getAuthenticated(self, browser, url): """ Getting authenticated. This method may be overwritten. TODO: update to version 2 of the wrappers. Args: ----- browser: The browser in which the user will be authenticated. url: The URL to get authenticated in. Return: ------- True or False. Raises: ------ NoCredentialsException: If no valid credentials have been found. BadImplementationError: If an expected attribute is missing. """ # check if we have creds try: if len(self.creds) > 0: # TODO: in choosing a cred there is an uneeded nesting of arrays c = random.choice(self.creds)[0] # adding the credential browser.setNewPassword(url, c.user, c.password) return True else: raise NoCredentialsException(str(self)) except AttributeError as e: raise BadImplementationError(str(e))
python
def _getAuthenticated(self, browser, url): """ Getting authenticated. This method may be overwritten. TODO: update to version 2 of the wrappers. Args: ----- browser: The browser in which the user will be authenticated. url: The URL to get authenticated in. Return: ------- True or False. Raises: ------ NoCredentialsException: If no valid credentials have been found. BadImplementationError: If an expected attribute is missing. """ # check if we have creds try: if len(self.creds) > 0: # TODO: in choosing a cred there is an uneeded nesting of arrays c = random.choice(self.creds)[0] # adding the credential browser.setNewPassword(url, c.user, c.password) return True else: raise NoCredentialsException(str(self)) except AttributeError as e: raise BadImplementationError(str(e))
[ "def", "_getAuthenticated", "(", "self", ",", "browser", ",", "url", ")", ":", "# check if we have creds", "try", ":", "if", "len", "(", "self", ".", "creds", ")", ">", "0", ":", "# TODO: in choosing a cred there is an uneeded nesting of arrays", "c", "=", "random", ".", "choice", "(", "self", ".", "creds", ")", "[", "0", "]", "# adding the credential", "browser", ".", "setNewPassword", "(", "url", ",", "c", ".", "user", ",", "c", ".", "password", ")", "return", "True", "else", ":", "raise", "NoCredentialsException", "(", "str", "(", "self", ")", ")", "except", "AttributeError", "as", "e", ":", "raise", "BadImplementationError", "(", "str", "(", "e", ")", ")" ]
Getting authenticated. This method may be overwritten. TODO: update to version 2 of the wrappers. Args: ----- browser: The browser in which the user will be authenticated. url: The URL to get authenticated in. Return: ------- True or False. Raises: ------ NoCredentialsException: If no valid credentials have been found. BadImplementationError: If an expected attribute is missing.
[ "Getting", "authenticated", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/platforms.py#L234-L267
5,501
i3visio/osrframework
osrframework/utils/platforms.py
Platform._isValidQuery
def _isValidQuery(self, query, mode="phonefy"): """ Method to verify if a given query is processable by the platform. The system looks for the forbidden characters in self.Forbidden list. Args: ----- query: The query to be launched. mode: To be chosen amongst mailfy, phonefy, usufy, searchfy. Return: ------- True | False """ try: # Suport for version 2 of wrappers validator = self.modes[mode].get("query_validator") if validator: try: compiledRegexp = re.compile( "^{expr}$".format( expr=validator ) ) return compiledRegexp.match(query) except AttributeError as e: return True except AttributeError as e: # Legacy for mantaining old wrappers compiledRegexp = re.compile("^{r}$".format(r=self.validQuery[mode])) return compiledRegexp.match(query)
python
def _isValidQuery(self, query, mode="phonefy"): """ Method to verify if a given query is processable by the platform. The system looks for the forbidden characters in self.Forbidden list. Args: ----- query: The query to be launched. mode: To be chosen amongst mailfy, phonefy, usufy, searchfy. Return: ------- True | False """ try: # Suport for version 2 of wrappers validator = self.modes[mode].get("query_validator") if validator: try: compiledRegexp = re.compile( "^{expr}$".format( expr=validator ) ) return compiledRegexp.match(query) except AttributeError as e: return True except AttributeError as e: # Legacy for mantaining old wrappers compiledRegexp = re.compile("^{r}$".format(r=self.validQuery[mode])) return compiledRegexp.match(query)
[ "def", "_isValidQuery", "(", "self", ",", "query", ",", "mode", "=", "\"phonefy\"", ")", ":", "try", ":", "# Suport for version 2 of wrappers", "validator", "=", "self", ".", "modes", "[", "mode", "]", ".", "get", "(", "\"query_validator\"", ")", "if", "validator", ":", "try", ":", "compiledRegexp", "=", "re", ".", "compile", "(", "\"^{expr}$\"", ".", "format", "(", "expr", "=", "validator", ")", ")", "return", "compiledRegexp", ".", "match", "(", "query", ")", "except", "AttributeError", "as", "e", ":", "return", "True", "except", "AttributeError", "as", "e", ":", "# Legacy for mantaining old wrappers", "compiledRegexp", "=", "re", ".", "compile", "(", "\"^{r}$\"", ".", "format", "(", "r", "=", "self", ".", "validQuery", "[", "mode", "]", ")", ")", "return", "compiledRegexp", ".", "match", "(", "query", ")" ]
Method to verify if a given query is processable by the platform. The system looks for the forbidden characters in self.Forbidden list. Args: ----- query: The query to be launched. mode: To be chosen amongst mailfy, phonefy, usufy, searchfy. Return: ------- True | False
[ "Method", "to", "verify", "if", "a", "given", "query", "is", "processable", "by", "the", "platform", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/platforms.py#L270-L301
5,502
i3visio/osrframework
osrframework/utils/platforms.py
Platform._somethingFound
def _somethingFound(self, data, mode="phonefy"): """ Verifying if something was found. Args: ----- data: Data where the self.notFoundText will be searched. mode: Mode to be executed. Return: ------- True if exists. """ if data: try: for text in self.notFoundText[mode]: if text in data: return False return True except AttributeError as e: # Update to version 2 of the wrappers. verifier = self.modes.get(mode) if verifier: if verifier.get("not_found_text", "") in data: return False else: return True return False
python
def _somethingFound(self, data, mode="phonefy"): """ Verifying if something was found. Args: ----- data: Data where the self.notFoundText will be searched. mode: Mode to be executed. Return: ------- True if exists. """ if data: try: for text in self.notFoundText[mode]: if text in data: return False return True except AttributeError as e: # Update to version 2 of the wrappers. verifier = self.modes.get(mode) if verifier: if verifier.get("not_found_text", "") in data: return False else: return True return False
[ "def", "_somethingFound", "(", "self", ",", "data", ",", "mode", "=", "\"phonefy\"", ")", ":", "if", "data", ":", "try", ":", "for", "text", "in", "self", ".", "notFoundText", "[", "mode", "]", ":", "if", "text", "in", "data", ":", "return", "False", "return", "True", "except", "AttributeError", "as", "e", ":", "# Update to version 2 of the wrappers.", "verifier", "=", "self", ".", "modes", ".", "get", "(", "mode", ")", "if", "verifier", ":", "if", "verifier", ".", "get", "(", "\"not_found_text\"", ",", "\"\"", ")", "in", "data", ":", "return", "False", "else", ":", "return", "True", "return", "False" ]
Verifying if something was found. Args: ----- data: Data where the self.notFoundText will be searched. mode: Mode to be executed. Return: ------- True if exists.
[ "Verifying", "if", "something", "was", "found", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/platforms.py#L304-L331
5,503
i3visio/osrframework
osrframework/utils/platforms.py
Platform.do_phonefy
def do_phonefy(self, query, **kwargs): """ Verifying a phonefy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended. """ results = [] test = self.check_phonefy(query, kwargs) if test: r = { "type": "i3visio.phone", "value": self.platformName + " - " + query, "attributes": [] } try: aux = { "type": "i3visio.uri", "value": self.createURL(query, mode="phonefy"), "attributes": [] } r["attributes"].append(aux) except: pass aux = { "type": "i3visio.platform", "value": self.platformName, "attributes": [] } r["attributes"].append(aux) # V2 of the wrappers r["attributes"] += self.process_phonefy(test) results.append(r) return results
python
def do_phonefy(self, query, **kwargs): """ Verifying a phonefy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended. """ results = [] test = self.check_phonefy(query, kwargs) if test: r = { "type": "i3visio.phone", "value": self.platformName + " - " + query, "attributes": [] } try: aux = { "type": "i3visio.uri", "value": self.createURL(query, mode="phonefy"), "attributes": [] } r["attributes"].append(aux) except: pass aux = { "type": "i3visio.platform", "value": self.platformName, "attributes": [] } r["attributes"].append(aux) # V2 of the wrappers r["attributes"] += self.process_phonefy(test) results.append(r) return results
[ "def", "do_phonefy", "(", "self", ",", "query", ",", "*", "*", "kwargs", ")", ":", "results", "=", "[", "]", "test", "=", "self", ".", "check_phonefy", "(", "query", ",", "kwargs", ")", "if", "test", ":", "r", "=", "{", "\"type\"", ":", "\"i3visio.phone\"", ",", "\"value\"", ":", "self", ".", "platformName", "+", "\" - \"", "+", "query", ",", "\"attributes\"", ":", "[", "]", "}", "try", ":", "aux", "=", "{", "\"type\"", ":", "\"i3visio.uri\"", ",", "\"value\"", ":", "self", ".", "createURL", "(", "query", ",", "mode", "=", "\"phonefy\"", ")", ",", "\"attributes\"", ":", "[", "]", "}", "r", "[", "\"attributes\"", "]", ".", "append", "(", "aux", ")", "except", ":", "pass", "aux", "=", "{", "\"type\"", ":", "\"i3visio.platform\"", ",", "\"value\"", ":", "self", ".", "platformName", ",", "\"attributes\"", ":", "[", "]", "}", "r", "[", "\"attributes\"", "]", ".", "append", "(", "aux", ")", "# V2 of the wrappers", "r", "[", "\"attributes\"", "]", "+=", "self", ".", "process_phonefy", "(", "test", ")", "results", ".", "append", "(", "r", ")", "return", "results" ]
Verifying a phonefy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended.
[ "Verifying", "a", "phonefy", "query", "in", "this", "platform", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/platforms.py#L541-L587
5,504
i3visio/osrframework
osrframework/utils/platforms.py
Platform.process_usufy
def process_usufy(self, data): """ Method to process and extract the entities of a usufy Args: ----- data: The information from which the info will be extracted. Return: ------- A list of the entities found. """ mode = "usufy" info = [] try: # v2 verifier = self.modes.get(mode, {}).get("extra_fields", {}) for field in verifier.keys(): regexp = verifier[field] values = re.findall(regexp, data) for val in values: aux = {} aux["type"] = field aux["value"] = val aux["attributes"] = [] if aux not in info: info.append(aux) except AttributeError as e: # Legacy for field in self.fieldsRegExp[mode].keys(): # Recovering the RegularExpression try: # Using the old approach of "Start" + "End" regexp = self.fieldsRegExp[mode][field]["start"]+"([^\)]+)"+self.fieldsRegExp[mode][field]["end"] tmp = re.findall(regexp, data) # Now we are performing an operation just in case the "end" tag is found in the results, which would mean that the tag selected matches something longer in the data. values = [] for t in tmp: if self.fieldsRegExp[mode][field]["end"] in t: values.append(t.split(self.fieldsRegExp[mode][field]["end"])[0]) else: values.append(t) except: # Using the compact approach if start and end tags do not exist. regexp = self.fieldsRegExp[mode][field] values = re.findall(regexp, data) for val in values: aux = {} aux["type"] = field aux["value"] = val aux["attributes"] = [] if aux not in info: info.append(aux) return info
python
def process_usufy(self, data): """ Method to process and extract the entities of a usufy Args: ----- data: The information from which the info will be extracted. Return: ------- A list of the entities found. """ mode = "usufy" info = [] try: # v2 verifier = self.modes.get(mode, {}).get("extra_fields", {}) for field in verifier.keys(): regexp = verifier[field] values = re.findall(regexp, data) for val in values: aux = {} aux["type"] = field aux["value"] = val aux["attributes"] = [] if aux not in info: info.append(aux) except AttributeError as e: # Legacy for field in self.fieldsRegExp[mode].keys(): # Recovering the RegularExpression try: # Using the old approach of "Start" + "End" regexp = self.fieldsRegExp[mode][field]["start"]+"([^\)]+)"+self.fieldsRegExp[mode][field]["end"] tmp = re.findall(regexp, data) # Now we are performing an operation just in case the "end" tag is found in the results, which would mean that the tag selected matches something longer in the data. values = [] for t in tmp: if self.fieldsRegExp[mode][field]["end"] in t: values.append(t.split(self.fieldsRegExp[mode][field]["end"])[0]) else: values.append(t) except: # Using the compact approach if start and end tags do not exist. regexp = self.fieldsRegExp[mode][field] values = re.findall(regexp, data) for val in values: aux = {} aux["type"] = field aux["value"] = val aux["attributes"] = [] if aux not in info: info.append(aux) return info
[ "def", "process_usufy", "(", "self", ",", "data", ")", ":", "mode", "=", "\"usufy\"", "info", "=", "[", "]", "try", ":", "# v2", "verifier", "=", "self", ".", "modes", ".", "get", "(", "mode", ",", "{", "}", ")", ".", "get", "(", "\"extra_fields\"", ",", "{", "}", ")", "for", "field", "in", "verifier", ".", "keys", "(", ")", ":", "regexp", "=", "verifier", "[", "field", "]", "values", "=", "re", ".", "findall", "(", "regexp", ",", "data", ")", "for", "val", "in", "values", ":", "aux", "=", "{", "}", "aux", "[", "\"type\"", "]", "=", "field", "aux", "[", "\"value\"", "]", "=", "val", "aux", "[", "\"attributes\"", "]", "=", "[", "]", "if", "aux", "not", "in", "info", ":", "info", ".", "append", "(", "aux", ")", "except", "AttributeError", "as", "e", ":", "# Legacy", "for", "field", "in", "self", ".", "fieldsRegExp", "[", "mode", "]", ".", "keys", "(", ")", ":", "# Recovering the RegularExpression", "try", ":", "# Using the old approach of \"Start\" + \"End\"", "regexp", "=", "self", ".", "fieldsRegExp", "[", "mode", "]", "[", "field", "]", "[", "\"start\"", "]", "+", "\"([^\\)]+)\"", "+", "self", ".", "fieldsRegExp", "[", "mode", "]", "[", "field", "]", "[", "\"end\"", "]", "tmp", "=", "re", ".", "findall", "(", "regexp", ",", "data", ")", "# Now we are performing an operation just in case the \"end\" tag is found in the results, which would mean that the tag selected matches something longer in the data.", "values", "=", "[", "]", "for", "t", "in", "tmp", ":", "if", "self", ".", "fieldsRegExp", "[", "mode", "]", "[", "field", "]", "[", "\"end\"", "]", "in", "t", ":", "values", ".", "append", "(", "t", ".", "split", "(", "self", ".", "fieldsRegExp", "[", "mode", "]", "[", "field", "]", "[", "\"end\"", "]", ")", "[", "0", "]", ")", "else", ":", "values", ".", "append", "(", "t", ")", "except", ":", "# Using the compact approach if start and end tags do not exist.", "regexp", "=", "self", ".", "fieldsRegExp", "[", "mode", "]", "[", "field", "]", "values", "=", "re", ".", "findall", "(", "regexp", ",", "data", ")", "for", "val", "in", "values", ":", "aux", "=", "{", "}", "aux", "[", "\"type\"", "]", "=", "field", "aux", "[", "\"value\"", "]", "=", "val", "aux", "[", "\"attributes\"", "]", "=", "[", "]", "if", "aux", "not", "in", "info", ":", "info", ".", "append", "(", "aux", ")", "return", "info" ]
Method to process and extract the entities of a usufy Args: ----- data: The information from which the info will be extracted. Return: ------- A list of the entities found.
[ "Method", "to", "process", "and", "extract", "the", "entities", "of", "a", "usufy" ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/platforms.py#L723-L783
5,505
i3visio/osrframework
osrframework/utils/benchmark.py
doBenchmark
def doBenchmark(plats): ''' Perform the benchmark... ''' logger = logging.getLogger("osrframework.utils") # defining the results dict res = {} # args args = [] #for p in plats: # args.append( (str(p),) ) # selecting the number of tries to be performed tries = [1, 4, 8 ,16, 24, 32, 40, 48, 56, 64] #for i in range(1, len(plats)/10): # tries.append(i*10) logger.info("The test is starting recovering webpages by creating the following series of threads: " + str(tries)) for i in tries: print "Testing creating " + str(i) + " simultaneous threads..." # starting t0 = time.clock() pool = Pool(i) # We call the wrapping function with all the args previously generated poolResults = pool.map(multi_run_wrapper, args) t1 = time.clock() # storing the results res[i] = t1 - t0 print str(i) + "\t" + str(res[i]) + "\n" return res
python
def doBenchmark(plats): ''' Perform the benchmark... ''' logger = logging.getLogger("osrframework.utils") # defining the results dict res = {} # args args = [] #for p in plats: # args.append( (str(p),) ) # selecting the number of tries to be performed tries = [1, 4, 8 ,16, 24, 32, 40, 48, 56, 64] #for i in range(1, len(plats)/10): # tries.append(i*10) logger.info("The test is starting recovering webpages by creating the following series of threads: " + str(tries)) for i in tries: print "Testing creating " + str(i) + " simultaneous threads..." # starting t0 = time.clock() pool = Pool(i) # We call the wrapping function with all the args previously generated poolResults = pool.map(multi_run_wrapper, args) t1 = time.clock() # storing the results res[i] = t1 - t0 print str(i) + "\t" + str(res[i]) + "\n" return res
[ "def", "doBenchmark", "(", "plats", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "\"osrframework.utils\"", ")", "# defining the results dict", "res", "=", "{", "}", "# args", "args", "=", "[", "]", "#for p in plats:", "#\targs.append( (str(p),) )", "# selecting the number of tries to be performed", "tries", "=", "[", "1", ",", "4", ",", "8", ",", "16", ",", "24", ",", "32", ",", "40", ",", "48", ",", "56", ",", "64", "]", "#for i in range(1, len(plats)/10):", "#\ttries.append(i*10)", "logger", ".", "info", "(", "\"The test is starting recovering webpages by creating the following series of threads: \"", "+", "str", "(", "tries", ")", ")", "for", "i", "in", "tries", ":", "print", "\"Testing creating \"", "+", "str", "(", "i", ")", "+", "\" simultaneous threads...\"", "# starting ", "t0", "=", "time", ".", "clock", "(", ")", "pool", "=", "Pool", "(", "i", ")", "# We call the wrapping function with all the args previously generated", "poolResults", "=", "pool", ".", "map", "(", "multi_run_wrapper", ",", "args", ")", "t1", "=", "time", ".", "clock", "(", ")", "# storing the results", "res", "[", "i", "]", "=", "t1", "-", "t0", "print", "str", "(", "i", ")", "+", "\"\\t\"", "+", "str", "(", "res", "[", "i", "]", ")", "+", "\"\\n\"", "return", "res" ]
Perform the benchmark...
[ "Perform", "the", "benchmark", "..." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/benchmark.py#L55-L90
5,506
i3visio/osrframework
osrframework/utils/configuration.py
changePermissionsRecursively
def changePermissionsRecursively(path, uid, gid): """ Function to recursively change the user id and group id. It sets 700 permissions. """ os.chown(path, uid, gid) for item in os.listdir(path): itempath = os.path.join(path, item) if os.path.isfile(itempath): # Setting owner try: os.chown(itempath, uid, gid) except Exception as e: # If this crashes it may be because we are running the # application in Windows systems, where os.chown does NOT work. pass # Setting permissions os.chmod(itempath, 600) elif os.path.isdir(itempath): # Setting owner try: os.chown(itempath, uid, gid) except Exception as e: # If this crashes it may be because we are running the # application in Windows systems, where os.chown does NOT work. pass # Setting permissions os.chmod(itempath, 6600) # Recursive function to iterate the files changePermissionsRecursively(itempath, uid, gid)
python
def changePermissionsRecursively(path, uid, gid): """ Function to recursively change the user id and group id. It sets 700 permissions. """ os.chown(path, uid, gid) for item in os.listdir(path): itempath = os.path.join(path, item) if os.path.isfile(itempath): # Setting owner try: os.chown(itempath, uid, gid) except Exception as e: # If this crashes it may be because we are running the # application in Windows systems, where os.chown does NOT work. pass # Setting permissions os.chmod(itempath, 600) elif os.path.isdir(itempath): # Setting owner try: os.chown(itempath, uid, gid) except Exception as e: # If this crashes it may be because we are running the # application in Windows systems, where os.chown does NOT work. pass # Setting permissions os.chmod(itempath, 6600) # Recursive function to iterate the files changePermissionsRecursively(itempath, uid, gid)
[ "def", "changePermissionsRecursively", "(", "path", ",", "uid", ",", "gid", ")", ":", "os", ".", "chown", "(", "path", ",", "uid", ",", "gid", ")", "for", "item", "in", "os", ".", "listdir", "(", "path", ")", ":", "itempath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "item", ")", "if", "os", ".", "path", ".", "isfile", "(", "itempath", ")", ":", "# Setting owner", "try", ":", "os", ".", "chown", "(", "itempath", ",", "uid", ",", "gid", ")", "except", "Exception", "as", "e", ":", "# If this crashes it may be because we are running the", "# application in Windows systems, where os.chown does NOT work.", "pass", "# Setting permissions", "os", ".", "chmod", "(", "itempath", ",", "600", ")", "elif", "os", ".", "path", ".", "isdir", "(", "itempath", ")", ":", "# Setting owner", "try", ":", "os", ".", "chown", "(", "itempath", ",", "uid", ",", "gid", ")", "except", "Exception", "as", "e", ":", "# If this crashes it may be because we are running the", "# application in Windows systems, where os.chown does NOT work.", "pass", "# Setting permissions", "os", ".", "chmod", "(", "itempath", ",", "6600", ")", "# Recursive function to iterate the files", "changePermissionsRecursively", "(", "itempath", ",", "uid", ",", "gid", ")" ]
Function to recursively change the user id and group id. It sets 700 permissions.
[ "Function", "to", "recursively", "change", "the", "user", "id", "and", "group", "id", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/configuration.py#L33-L63
5,507
i3visio/osrframework
osrframework/utils/configuration.py
getConfigPath
def getConfigPath(configFileName = None): """ Auxiliar function to get the configuration paths depending on the system Args: ----- configFileName: TODO. Returns: -------- A dictionary with the following keys: appPath, appPathDefaults, appPathTransforms, appPathPlugins, appPathPatterns, appPathPatterns. """ paths = {} applicationPath = "./" # Returning the path of the configuration folder if sys.platform == 'win32': applicationPath = os.path.expanduser(os.path.join('~\\', 'OSRFramework')) else: applicationPath = os.path.expanduser(os.path.join('~/', '.config', 'OSRFramework')) # Defining additional folders paths = { "appPath": applicationPath, "appPathData": os.path.join(applicationPath, "data"), "appPathDefaults": os.path.join(applicationPath, "default"), "appPathPlugins": os.path.join(applicationPath, "plugins"), "appPathWrappers": os.path.join(applicationPath, "plugins", "wrappers"), "appPathPatterns": os.path.join(applicationPath, "plugins", "patterns"), } # Creating them if they don't exist for path in paths.keys(): if not os.path.exists(paths[path]): os.makedirs(paths[path]) return paths
python
def getConfigPath(configFileName = None): """ Auxiliar function to get the configuration paths depending on the system Args: ----- configFileName: TODO. Returns: -------- A dictionary with the following keys: appPath, appPathDefaults, appPathTransforms, appPathPlugins, appPathPatterns, appPathPatterns. """ paths = {} applicationPath = "./" # Returning the path of the configuration folder if sys.platform == 'win32': applicationPath = os.path.expanduser(os.path.join('~\\', 'OSRFramework')) else: applicationPath = os.path.expanduser(os.path.join('~/', '.config', 'OSRFramework')) # Defining additional folders paths = { "appPath": applicationPath, "appPathData": os.path.join(applicationPath, "data"), "appPathDefaults": os.path.join(applicationPath, "default"), "appPathPlugins": os.path.join(applicationPath, "plugins"), "appPathWrappers": os.path.join(applicationPath, "plugins", "wrappers"), "appPathPatterns": os.path.join(applicationPath, "plugins", "patterns"), } # Creating them if they don't exist for path in paths.keys(): if not os.path.exists(paths[path]): os.makedirs(paths[path]) return paths
[ "def", "getConfigPath", "(", "configFileName", "=", "None", ")", ":", "paths", "=", "{", "}", "applicationPath", "=", "\"./\"", "# Returning the path of the configuration folder", "if", "sys", ".", "platform", "==", "'win32'", ":", "applicationPath", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "join", "(", "'~\\\\'", ",", "'OSRFramework'", ")", ")", "else", ":", "applicationPath", "=", "os", ".", "path", ".", "expanduser", "(", "os", ".", "path", ".", "join", "(", "'~/'", ",", "'.config'", ",", "'OSRFramework'", ")", ")", "# Defining additional folders", "paths", "=", "{", "\"appPath\"", ":", "applicationPath", ",", "\"appPathData\"", ":", "os", ".", "path", ".", "join", "(", "applicationPath", ",", "\"data\"", ")", ",", "\"appPathDefaults\"", ":", "os", ".", "path", ".", "join", "(", "applicationPath", ",", "\"default\"", ")", ",", "\"appPathPlugins\"", ":", "os", ".", "path", ".", "join", "(", "applicationPath", ",", "\"plugins\"", ")", ",", "\"appPathWrappers\"", ":", "os", ".", "path", ".", "join", "(", "applicationPath", ",", "\"plugins\"", ",", "\"wrappers\"", ")", ",", "\"appPathPatterns\"", ":", "os", ".", "path", ".", "join", "(", "applicationPath", ",", "\"plugins\"", ",", "\"patterns\"", ")", ",", "}", "# Creating them if they don't exist", "for", "path", "in", "paths", ".", "keys", "(", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "paths", "[", "path", "]", ")", ":", "os", ".", "makedirs", "(", "paths", "[", "path", "]", ")", "return", "paths" ]
Auxiliar function to get the configuration paths depending on the system Args: ----- configFileName: TODO. Returns: -------- A dictionary with the following keys: appPath, appPathDefaults, appPathTransforms, appPathPlugins, appPathPatterns, appPathPatterns.
[ "Auxiliar", "function", "to", "get", "the", "configuration", "paths", "depending", "on", "the", "system" ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/configuration.py#L66-L103
5,508
i3visio/osrframework
osrframework/utils/configuration.py
returnListOfConfigurationValues
def returnListOfConfigurationValues(util): """ Method that recovers the configuration information about each program TODO: Grab the default file from the package data instead of storing it in the main folder. Args: ----- util: Any of the utils that are contained in the framework: domainfy, entify, mailfy, phonefy, searchfy, usufy. Returns: -------- A dictionary containing the default configuration. """ VALUES = {} # If a api_keys.cfg has not been found, creating it by copying from default configPath = os.path.join(getConfigPath()["appPath"], "general.cfg") # Checking if the configuration file exists if not os.path.exists(configPath): # Copy the data from the default folder defaultConfigPath = os.path.join(getConfigPath()["appPathDefaults"], "general.cfg") try: # Recovering default file with open(defaultConfigPath) as iF: cont = iF.read() # Moving its contents as the default values with open(configPath, "w") as oF: oF.write(cont) except Exception as e: raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath); # Reading the configuration file config = ConfigParser.ConfigParser() config.read(configPath) LISTS = ["tlds", "domains", "platforms", "extension", "exclude_platforms", "exclude_domains"] # Iterating through all the sections, which contain the platforms for section in config.sections(): incomplete = False if section.lower() == util.lower(): # Iterating through parameters for (param, value) in config.items(section): if value == '': # Manually setting an empty value if param in LISTS: value = [] else: value = "" # Splitting the parameters to create the arrays when needed elif param in LISTS: value = value.split(' ') # Converting threads to int elif param == "threads": try: value = int(value) except Exception as err: raise errors.ConfigurationParameterNotValidError(configPath, section, param, value) elif param == "debug": try: if int(value) == 0: value = False else: value = True except Exception as err: print("Something happened when processing this debug option. Resetting to default.") # Copy the data from the default folder defaultConfigPath = os.path.join(getConfigPath()["appPathDefaults"], "general.cfg") try: # Recovering default file with open(defaultConfigPath) as iF: cont = iF.read() # Moving its contents as the default values with open(configPath, "w") as oF: oF.write(cont) except Exception as e: raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath); #raise errors.ConfigurationParameterNotValidError(configPath, section, param, value) VALUES[param] = value break return VALUES
python
def returnListOfConfigurationValues(util): """ Method that recovers the configuration information about each program TODO: Grab the default file from the package data instead of storing it in the main folder. Args: ----- util: Any of the utils that are contained in the framework: domainfy, entify, mailfy, phonefy, searchfy, usufy. Returns: -------- A dictionary containing the default configuration. """ VALUES = {} # If a api_keys.cfg has not been found, creating it by copying from default configPath = os.path.join(getConfigPath()["appPath"], "general.cfg") # Checking if the configuration file exists if not os.path.exists(configPath): # Copy the data from the default folder defaultConfigPath = os.path.join(getConfigPath()["appPathDefaults"], "general.cfg") try: # Recovering default file with open(defaultConfigPath) as iF: cont = iF.read() # Moving its contents as the default values with open(configPath, "w") as oF: oF.write(cont) except Exception as e: raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath); # Reading the configuration file config = ConfigParser.ConfigParser() config.read(configPath) LISTS = ["tlds", "domains", "platforms", "extension", "exclude_platforms", "exclude_domains"] # Iterating through all the sections, which contain the platforms for section in config.sections(): incomplete = False if section.lower() == util.lower(): # Iterating through parameters for (param, value) in config.items(section): if value == '': # Manually setting an empty value if param in LISTS: value = [] else: value = "" # Splitting the parameters to create the arrays when needed elif param in LISTS: value = value.split(' ') # Converting threads to int elif param == "threads": try: value = int(value) except Exception as err: raise errors.ConfigurationParameterNotValidError(configPath, section, param, value) elif param == "debug": try: if int(value) == 0: value = False else: value = True except Exception as err: print("Something happened when processing this debug option. Resetting to default.") # Copy the data from the default folder defaultConfigPath = os.path.join(getConfigPath()["appPathDefaults"], "general.cfg") try: # Recovering default file with open(defaultConfigPath) as iF: cont = iF.read() # Moving its contents as the default values with open(configPath, "w") as oF: oF.write(cont) except Exception as e: raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath); #raise errors.ConfigurationParameterNotValidError(configPath, section, param, value) VALUES[param] = value break return VALUES
[ "def", "returnListOfConfigurationValues", "(", "util", ")", ":", "VALUES", "=", "{", "}", "# If a api_keys.cfg has not been found, creating it by copying from default", "configPath", "=", "os", ".", "path", ".", "join", "(", "getConfigPath", "(", ")", "[", "\"appPath\"", "]", ",", "\"general.cfg\"", ")", "# Checking if the configuration file exists", "if", "not", "os", ".", "path", ".", "exists", "(", "configPath", ")", ":", "# Copy the data from the default folder", "defaultConfigPath", "=", "os", ".", "path", ".", "join", "(", "getConfigPath", "(", ")", "[", "\"appPathDefaults\"", "]", ",", "\"general.cfg\"", ")", "try", ":", "# Recovering default file", "with", "open", "(", "defaultConfigPath", ")", "as", "iF", ":", "cont", "=", "iF", ".", "read", "(", ")", "# Moving its contents as the default values", "with", "open", "(", "configPath", ",", "\"w\"", ")", "as", "oF", ":", "oF", ".", "write", "(", "cont", ")", "except", "Exception", "as", "e", ":", "raise", "errors", ".", "DefaultConfigurationFileNotFoundError", "(", "configPath", ",", "defaultConfigPath", ")", "# Reading the configuration file", "config", "=", "ConfigParser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "configPath", ")", "LISTS", "=", "[", "\"tlds\"", ",", "\"domains\"", ",", "\"platforms\"", ",", "\"extension\"", ",", "\"exclude_platforms\"", ",", "\"exclude_domains\"", "]", "# Iterating through all the sections, which contain the platforms", "for", "section", "in", "config", ".", "sections", "(", ")", ":", "incomplete", "=", "False", "if", "section", ".", "lower", "(", ")", "==", "util", ".", "lower", "(", ")", ":", "# Iterating through parameters", "for", "(", "param", ",", "value", ")", "in", "config", ".", "items", "(", "section", ")", ":", "if", "value", "==", "''", ":", "# Manually setting an empty value", "if", "param", "in", "LISTS", ":", "value", "=", "[", "]", "else", ":", "value", "=", "\"\"", "# Splitting the parameters to create the arrays when needed", "elif", "param", "in", "LISTS", ":", "value", "=", "value", ".", "split", "(", "' '", ")", "# Converting threads to int", "elif", "param", "==", "\"threads\"", ":", "try", ":", "value", "=", "int", "(", "value", ")", "except", "Exception", "as", "err", ":", "raise", "errors", ".", "ConfigurationParameterNotValidError", "(", "configPath", ",", "section", ",", "param", ",", "value", ")", "elif", "param", "==", "\"debug\"", ":", "try", ":", "if", "int", "(", "value", ")", "==", "0", ":", "value", "=", "False", "else", ":", "value", "=", "True", "except", "Exception", "as", "err", ":", "print", "(", "\"Something happened when processing this debug option. Resetting to default.\"", ")", "# Copy the data from the default folder", "defaultConfigPath", "=", "os", ".", "path", ".", "join", "(", "getConfigPath", "(", ")", "[", "\"appPathDefaults\"", "]", ",", "\"general.cfg\"", ")", "try", ":", "# Recovering default file", "with", "open", "(", "defaultConfigPath", ")", "as", "iF", ":", "cont", "=", "iF", ".", "read", "(", ")", "# Moving its contents as the default values", "with", "open", "(", "configPath", ",", "\"w\"", ")", "as", "oF", ":", "oF", ".", "write", "(", "cont", ")", "except", "Exception", "as", "e", ":", "raise", "errors", ".", "DefaultConfigurationFileNotFoundError", "(", "configPath", ",", "defaultConfigPath", ")", "#raise errors.ConfigurationParameterNotValidError(configPath, section, param, value)", "VALUES", "[", "param", "]", "=", "value", "break", "return", "VALUES" ]
Method that recovers the configuration information about each program TODO: Grab the default file from the package data instead of storing it in the main folder. Args: ----- util: Any of the utils that are contained in the framework: domainfy, entify, mailfy, phonefy, searchfy, usufy. Returns: -------- A dictionary containing the default configuration.
[ "Method", "that", "recovers", "the", "configuration", "information", "about", "each", "program" ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/configuration.py#L106-L195
5,509
i3visio/osrframework
osrframework/searchfy.py
performSearch
def performSearch(platformNames=[], queries=[], process=False, excludePlatformNames=[]): """ Method to perform the search itself on the different platforms. Args: ----- platforms: List of <Platform> objects. queries: List of queries to be performed. process: Whether to process all the profiles... SLOW! Returns: -------- A list with the entities collected. """ # Grabbing the <Platform> objects platforms = platform_selection.getPlatformsByName(platformNames, mode="searchfy", excludePlatformNames=excludePlatformNames) results = [] for q in queries: for pla in platforms: # This returns a json.txt! entities = pla.getInfo(query=q, process = process, mode="searchfy") if entities != "[]": results += json.loads(entities) return results
python
def performSearch(platformNames=[], queries=[], process=False, excludePlatformNames=[]): """ Method to perform the search itself on the different platforms. Args: ----- platforms: List of <Platform> objects. queries: List of queries to be performed. process: Whether to process all the profiles... SLOW! Returns: -------- A list with the entities collected. """ # Grabbing the <Platform> objects platforms = platform_selection.getPlatformsByName(platformNames, mode="searchfy", excludePlatformNames=excludePlatformNames) results = [] for q in queries: for pla in platforms: # This returns a json.txt! entities = pla.getInfo(query=q, process = process, mode="searchfy") if entities != "[]": results += json.loads(entities) return results
[ "def", "performSearch", "(", "platformNames", "=", "[", "]", ",", "queries", "=", "[", "]", ",", "process", "=", "False", ",", "excludePlatformNames", "=", "[", "]", ")", ":", "# Grabbing the <Platform> objects", "platforms", "=", "platform_selection", ".", "getPlatformsByName", "(", "platformNames", ",", "mode", "=", "\"searchfy\"", ",", "excludePlatformNames", "=", "excludePlatformNames", ")", "results", "=", "[", "]", "for", "q", "in", "queries", ":", "for", "pla", "in", "platforms", ":", "# This returns a json.txt!", "entities", "=", "pla", ".", "getInfo", "(", "query", "=", "q", ",", "process", "=", "process", ",", "mode", "=", "\"searchfy\"", ")", "if", "entities", "!=", "\"[]\"", ":", "results", "+=", "json", ".", "loads", "(", "entities", ")", "return", "results" ]
Method to perform the search itself on the different platforms. Args: ----- platforms: List of <Platform> objects. queries: List of queries to be performed. process: Whether to process all the profiles... SLOW! Returns: -------- A list with the entities collected.
[ "Method", "to", "perform", "the", "search", "itself", "on", "the", "different", "platforms", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/searchfy.py#L37-L60
5,510
i3visio/osrframework
osrframework/utils/platform_selection.py
getAllPlatformNames
def getAllPlatformNames(mode): """Method that defines the whole list of available parameters. :param mode: The mode of the search. The following can be chosen: ["phonefy", "usufy", "searchfy"]. Return values: Returns a list [] of strings for the platform objects. """ # Recovering all the possible platforms installed platOptions = [] if mode in ["phonefy", "usufy", "searchfy", "mailfy"]: allPlatforms = getAllPlatformObjects(mode=mode) # Defining the platOptions for p in allPlatforms: try: # E. g.: to use wikipedia instead of wikipedia_ca and so on parameter = p.parameterName except: parameter = p.platformName.lower() if parameter not in platOptions: platOptions.append(parameter) elif mode == "domainfy": platOptions = osrframework.domainfy.TLD.keys() platOptions = sorted(set(platOptions)) platOptions.insert(0, 'all') return platOptions
python
def getAllPlatformNames(mode): """Method that defines the whole list of available parameters. :param mode: The mode of the search. The following can be chosen: ["phonefy", "usufy", "searchfy"]. Return values: Returns a list [] of strings for the platform objects. """ # Recovering all the possible platforms installed platOptions = [] if mode in ["phonefy", "usufy", "searchfy", "mailfy"]: allPlatforms = getAllPlatformObjects(mode=mode) # Defining the platOptions for p in allPlatforms: try: # E. g.: to use wikipedia instead of wikipedia_ca and so on parameter = p.parameterName except: parameter = p.platformName.lower() if parameter not in platOptions: platOptions.append(parameter) elif mode == "domainfy": platOptions = osrframework.domainfy.TLD.keys() platOptions = sorted(set(platOptions)) platOptions.insert(0, 'all') return platOptions
[ "def", "getAllPlatformNames", "(", "mode", ")", ":", "# Recovering all the possible platforms installed", "platOptions", "=", "[", "]", "if", "mode", "in", "[", "\"phonefy\"", ",", "\"usufy\"", ",", "\"searchfy\"", ",", "\"mailfy\"", "]", ":", "allPlatforms", "=", "getAllPlatformObjects", "(", "mode", "=", "mode", ")", "# Defining the platOptions", "for", "p", "in", "allPlatforms", ":", "try", ":", "# E. g.: to use wikipedia instead of wikipedia_ca and so on", "parameter", "=", "p", ".", "parameterName", "except", ":", "parameter", "=", "p", ".", "platformName", ".", "lower", "(", ")", "if", "parameter", "not", "in", "platOptions", ":", "platOptions", ".", "append", "(", "parameter", ")", "elif", "mode", "==", "\"domainfy\"", ":", "platOptions", "=", "osrframework", ".", "domainfy", ".", "TLD", ".", "keys", "(", ")", "platOptions", "=", "sorted", "(", "set", "(", "platOptions", ")", ")", "platOptions", ".", "insert", "(", "0", ",", "'all'", ")", "return", "platOptions" ]
Method that defines the whole list of available parameters. :param mode: The mode of the search. The following can be chosen: ["phonefy", "usufy", "searchfy"]. Return values: Returns a list [] of strings for the platform objects.
[ "Method", "that", "defines", "the", "whole", "list", "of", "available", "parameters", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/utils/platform_selection.py#L35-L62
5,511
i3visio/osrframework
osrframework/thirdparties/pipl_com/lib/utils.py
Serializable.from_json
def from_json(cls, json_str): """Deserialize the object from a JSON string.""" d = json.loads(json_str) return cls.from_dict(d)
python
def from_json(cls, json_str): """Deserialize the object from a JSON string.""" d = json.loads(json_str) return cls.from_dict(d)
[ "def", "from_json", "(", "cls", ",", "json_str", ")", ":", "d", "=", "json", ".", "loads", "(", "json_str", ")", "return", "cls", ".", "from_dict", "(", "d", ")" ]
Deserialize the object from a JSON string.
[ "Deserialize", "the", "object", "from", "a", "JSON", "string", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/utils.py#L33-L36
5,512
i3visio/osrframework
osrframework/checkfy.py
createEmails
def createEmails(nicks=None, nicksFile=None): """ Method that globally permits to generate the emails to be checked. Args: ----- nicks: List of aliases. nicksFile: The filepath to the aliases file. Returns: -------- list: list of emails to be checked. """ candidate_emails = set() if nicks != None: for n in nicks: for e in email_providers.domains: candidate_emails.add("{}@{}".format(n, e)) elif nicksFile != None: with open(nicksFile, "r") as iF: nicks = iF.read().splitlines() for n in nicks: for e in email_providers.domains: candidate_emails.add("{}@{}".format(n, e)) return candidate_emails
python
def createEmails(nicks=None, nicksFile=None): """ Method that globally permits to generate the emails to be checked. Args: ----- nicks: List of aliases. nicksFile: The filepath to the aliases file. Returns: -------- list: list of emails to be checked. """ candidate_emails = set() if nicks != None: for n in nicks: for e in email_providers.domains: candidate_emails.add("{}@{}".format(n, e)) elif nicksFile != None: with open(nicksFile, "r") as iF: nicks = iF.read().splitlines() for n in nicks: for e in email_providers.domains: candidate_emails.add("{}@{}".format(n, e)) return candidate_emails
[ "def", "createEmails", "(", "nicks", "=", "None", ",", "nicksFile", "=", "None", ")", ":", "candidate_emails", "=", "set", "(", ")", "if", "nicks", "!=", "None", ":", "for", "n", "in", "nicks", ":", "for", "e", "in", "email_providers", ".", "domains", ":", "candidate_emails", ".", "add", "(", "\"{}@{}\"", ".", "format", "(", "n", ",", "e", ")", ")", "elif", "nicksFile", "!=", "None", ":", "with", "open", "(", "nicksFile", ",", "\"r\"", ")", "as", "iF", ":", "nicks", "=", "iF", ".", "read", "(", ")", ".", "splitlines", "(", ")", "for", "n", "in", "nicks", ":", "for", "e", "in", "email_providers", ".", "domains", ":", "candidate_emails", ".", "add", "(", "\"{}@{}\"", ".", "format", "(", "n", ",", "e", ")", ")", "return", "candidate_emails" ]
Method that globally permits to generate the emails to be checked. Args: ----- nicks: List of aliases. nicksFile: The filepath to the aliases file. Returns: -------- list: list of emails to be checked.
[ "Method", "that", "globally", "permits", "to", "generate", "the", "emails", "to", "be", "checked", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/checkfy.py#L39-L63
5,513
i3visio/osrframework
osrframework/thirdparties/resolvethem_com/processing.py
checkIPFromAlias
def checkIPFromAlias(alias=None): ''' Method that checks if the given alias is currently connected to Skype and returns its IP address. :param alias: Alias to be searched. :return: Python structure for the Json received. It has the following structure: { "type": "i3visio.ip", "value": "1.1.1.1", "attributes" : [] } ''' headers = { "Content-type": "text/html", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Encoding": " gzip, deflate", "Accept-Language": " es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3", "Connection": "keep-alive", "DNT": "1", "Host": "www.resolvethem.com", "Referer": "http://www.resolvethem.com/index.php", "User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0", "Content-Length": "26", "Content-Type": "application/x-www-form-urlencoded", } req = requests.post("http://www.resolvethem.com/index.php",headers=headers,data={'skypeUsername': alias,'submit':''}) # Data returned data = req.content # Compilation of the regular expression p = re.compile("class='alert alert-success'>([0-9\.]*)<") allMatches = p.findall(data) if len(allMatches)> 0: jsonData = {} jsonData["type"]="i3visio.ip" jsonData["value"]=allMatches[0] jsonData["attributes"]=[] return jsonData return {}
python
def checkIPFromAlias(alias=None): ''' Method that checks if the given alias is currently connected to Skype and returns its IP address. :param alias: Alias to be searched. :return: Python structure for the Json received. It has the following structure: { "type": "i3visio.ip", "value": "1.1.1.1", "attributes" : [] } ''' headers = { "Content-type": "text/html", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Encoding": " gzip, deflate", "Accept-Language": " es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3", "Connection": "keep-alive", "DNT": "1", "Host": "www.resolvethem.com", "Referer": "http://www.resolvethem.com/index.php", "User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0", "Content-Length": "26", "Content-Type": "application/x-www-form-urlencoded", } req = requests.post("http://www.resolvethem.com/index.php",headers=headers,data={'skypeUsername': alias,'submit':''}) # Data returned data = req.content # Compilation of the regular expression p = re.compile("class='alert alert-success'>([0-9\.]*)<") allMatches = p.findall(data) if len(allMatches)> 0: jsonData = {} jsonData["type"]="i3visio.ip" jsonData["value"]=allMatches[0] jsonData["attributes"]=[] return jsonData return {}
[ "def", "checkIPFromAlias", "(", "alias", "=", "None", ")", ":", "headers", "=", "{", "\"Content-type\"", ":", "\"text/html\"", ",", "\"Accept\"", ":", "\"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\"", ",", "\"Accept-Encoding\"", ":", "\" gzip, deflate\"", ",", "\"Accept-Language\"", ":", "\" es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3\"", ",", "\"Connection\"", ":", "\"keep-alive\"", ",", "\"DNT\"", ":", "\"1\"", ",", "\"Host\"", ":", "\"www.resolvethem.com\"", ",", "\"Referer\"", ":", "\"http://www.resolvethem.com/index.php\"", ",", "\"User-Agent\"", ":", "\"Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0\"", ",", "\"Content-Length\"", ":", "\"26\"", ",", "\"Content-Type\"", ":", "\"application/x-www-form-urlencoded\"", ",", "}", "req", "=", "requests", ".", "post", "(", "\"http://www.resolvethem.com/index.php\"", ",", "headers", "=", "headers", ",", "data", "=", "{", "'skypeUsername'", ":", "alias", ",", "'submit'", ":", "''", "}", ")", "# Data returned", "data", "=", "req", ".", "content", "# Compilation of the regular expression", "p", "=", "re", ".", "compile", "(", "\"class='alert alert-success'>([0-9\\.]*)<\"", ")", "allMatches", "=", "p", ".", "findall", "(", "data", ")", "if", "len", "(", "allMatches", ")", ">", "0", ":", "jsonData", "=", "{", "}", "jsonData", "[", "\"type\"", "]", "=", "\"i3visio.ip\"", "jsonData", "[", "\"value\"", "]", "=", "allMatches", "[", "0", "]", "jsonData", "[", "\"attributes\"", "]", "=", "[", "]", "return", "jsonData", "return", "{", "}" ]
Method that checks if the given alias is currently connected to Skype and returns its IP address. :param alias: Alias to be searched. :return: Python structure for the Json received. It has the following structure: { "type": "i3visio.ip", "value": "1.1.1.1", "attributes" : [] }
[ "Method", "that", "checks", "if", "the", "given", "alias", "is", "currently", "connected", "to", "Skype", "and", "returns", "its", "IP", "address", "." ]
83437f4c14c9c08cb80a896bd9834c77f6567871
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/resolvethem_com/processing.py#L26-L65
5,514
jrief/djangocms-cascade
cmsplugin_cascade/sharable/forms.py
SharableGlossaryMixin.get_form
def get_form(self, request, obj=None, **kwargs): """ Extend the form for the given plugin with the form SharableCascadeForm """ Form = type(str('ExtSharableForm'), (SharableCascadeForm, kwargs.pop('form', self.form)), {}) Form.base_fields['shared_glossary'].limit_choices_to = dict(plugin_type=self.__class__.__name__) kwargs.update(form=Form) return super(SharableGlossaryMixin, self).get_form(request, obj, **kwargs)
python
def get_form(self, request, obj=None, **kwargs): """ Extend the form for the given plugin with the form SharableCascadeForm """ Form = type(str('ExtSharableForm'), (SharableCascadeForm, kwargs.pop('form', self.form)), {}) Form.base_fields['shared_glossary'].limit_choices_to = dict(plugin_type=self.__class__.__name__) kwargs.update(form=Form) return super(SharableGlossaryMixin, self).get_form(request, obj, **kwargs)
[ "def", "get_form", "(", "self", ",", "request", ",", "obj", "=", "None", ",", "*", "*", "kwargs", ")", ":", "Form", "=", "type", "(", "str", "(", "'ExtSharableForm'", ")", ",", "(", "SharableCascadeForm", ",", "kwargs", ".", "pop", "(", "'form'", ",", "self", ".", "form", ")", ")", ",", "{", "}", ")", "Form", ".", "base_fields", "[", "'shared_glossary'", "]", ".", "limit_choices_to", "=", "dict", "(", "plugin_type", "=", "self", ".", "__class__", ".", "__name__", ")", "kwargs", ".", "update", "(", "form", "=", "Form", ")", "return", "super", "(", "SharableGlossaryMixin", ",", "self", ")", ".", "get_form", "(", "request", ",", "obj", ",", "*", "*", "kwargs", ")" ]
Extend the form for the given plugin with the form SharableCascadeForm
[ "Extend", "the", "form", "for", "the", "given", "plugin", "with", "the", "form", "SharableCascadeForm" ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/sharable/forms.py#L122-L129
5,515
jrief/djangocms-cascade
cmsplugin_cascade/bootstrap4/grid.py
Bootstrap4Column.get_min_max_bounds
def get_min_max_bounds(self): """ Return a dict of min- and max-values for the given column. This is required to estimate the bounds of images. """ bound = Bound(999999.0, 0.0) for bp in Breakpoint: bound.extend(self.get_bound(bp)) return {'min': bound.min, 'max': bound.max}
python
def get_min_max_bounds(self): """ Return a dict of min- and max-values for the given column. This is required to estimate the bounds of images. """ bound = Bound(999999.0, 0.0) for bp in Breakpoint: bound.extend(self.get_bound(bp)) return {'min': bound.min, 'max': bound.max}
[ "def", "get_min_max_bounds", "(", "self", ")", ":", "bound", "=", "Bound", "(", "999999.0", ",", "0.0", ")", "for", "bp", "in", "Breakpoint", ":", "bound", ".", "extend", "(", "self", ".", "get_bound", "(", "bp", ")", ")", "return", "{", "'min'", ":", "bound", ".", "min", ",", "'max'", ":", "bound", ".", "max", "}" ]
Return a dict of min- and max-values for the given column. This is required to estimate the bounds of images.
[ "Return", "a", "dict", "of", "min", "-", "and", "max", "-", "values", "for", "the", "given", "column", ".", "This", "is", "required", "to", "estimate", "the", "bounds", "of", "images", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/bootstrap4/grid.py#L306-L314
5,516
jrief/djangocms-cascade
cmsplugin_cascade/plugin_base.py
create_proxy_model
def create_proxy_model(name, model_mixins, base_model, attrs=None, module=None): """ Create a Django Proxy Model on the fly, to be used by any Cascade Plugin. """ from django.apps import apps class Meta: proxy = True app_label = 'cmsplugin_cascade' name = str(name + 'Model') try: Model = apps.get_registered_model(Meta.app_label, name) except LookupError: bases = model_mixins + (base_model,) attrs = dict(attrs or {}, Meta=Meta, __module__=module) Model = type(name, bases, attrs) fake_proxy_models[name] = bases return Model
python
def create_proxy_model(name, model_mixins, base_model, attrs=None, module=None): """ Create a Django Proxy Model on the fly, to be used by any Cascade Plugin. """ from django.apps import apps class Meta: proxy = True app_label = 'cmsplugin_cascade' name = str(name + 'Model') try: Model = apps.get_registered_model(Meta.app_label, name) except LookupError: bases = model_mixins + (base_model,) attrs = dict(attrs or {}, Meta=Meta, __module__=module) Model = type(name, bases, attrs) fake_proxy_models[name] = bases return Model
[ "def", "create_proxy_model", "(", "name", ",", "model_mixins", ",", "base_model", ",", "attrs", "=", "None", ",", "module", "=", "None", ")", ":", "from", "django", ".", "apps", "import", "apps", "class", "Meta", ":", "proxy", "=", "True", "app_label", "=", "'cmsplugin_cascade'", "name", "=", "str", "(", "name", "+", "'Model'", ")", "try", ":", "Model", "=", "apps", ".", "get_registered_model", "(", "Meta", ".", "app_label", ",", "name", ")", "except", "LookupError", ":", "bases", "=", "model_mixins", "+", "(", "base_model", ",", ")", "attrs", "=", "dict", "(", "attrs", "or", "{", "}", ",", "Meta", "=", "Meta", ",", "__module__", "=", "module", ")", "Model", "=", "type", "(", "name", ",", "bases", ",", "attrs", ")", "fake_proxy_models", "[", "name", "]", "=", "bases", "return", "Model" ]
Create a Django Proxy Model on the fly, to be used by any Cascade Plugin.
[ "Create", "a", "Django", "Proxy", "Model", "on", "the", "fly", "to", "be", "used", "by", "any", "Cascade", "Plugin", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/plugin_base.py#L35-L53
5,517
jrief/djangocms-cascade
cmsplugin_cascade/plugin_base.py
CascadePluginBase._get_parent_classes_transparent
def _get_parent_classes_transparent(cls, slot, page, instance=None): """ Return all parent classes including those marked as "transparent". """ parent_classes = super(CascadePluginBase, cls).get_parent_classes(slot, page, instance) if parent_classes is None: if cls.get_require_parent(slot, page) is False: return parent_classes = [] # add all plugins marked as 'transparent', since they all are potential parents parent_classes = set(parent_classes) parent_classes.update(TransparentContainer.get_plugins()) return list(parent_classes)
python
def _get_parent_classes_transparent(cls, slot, page, instance=None): """ Return all parent classes including those marked as "transparent". """ parent_classes = super(CascadePluginBase, cls).get_parent_classes(slot, page, instance) if parent_classes is None: if cls.get_require_parent(slot, page) is False: return parent_classes = [] # add all plugins marked as 'transparent', since they all are potential parents parent_classes = set(parent_classes) parent_classes.update(TransparentContainer.get_plugins()) return list(parent_classes)
[ "def", "_get_parent_classes_transparent", "(", "cls", ",", "slot", ",", "page", ",", "instance", "=", "None", ")", ":", "parent_classes", "=", "super", "(", "CascadePluginBase", ",", "cls", ")", ".", "get_parent_classes", "(", "slot", ",", "page", ",", "instance", ")", "if", "parent_classes", "is", "None", ":", "if", "cls", ".", "get_require_parent", "(", "slot", ",", "page", ")", "is", "False", ":", "return", "parent_classes", "=", "[", "]", "# add all plugins marked as 'transparent', since they all are potential parents", "parent_classes", "=", "set", "(", "parent_classes", ")", "parent_classes", ".", "update", "(", "TransparentContainer", ".", "get_plugins", "(", ")", ")", "return", "list", "(", "parent_classes", ")" ]
Return all parent classes including those marked as "transparent".
[ "Return", "all", "parent", "classes", "including", "those", "marked", "as", "transparent", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/plugin_base.py#L282-L295
5,518
jrief/djangocms-cascade
cmsplugin_cascade/plugin_base.py
CascadePluginBase.extend_children
def extend_children(self, parent, wanted_children, child_class, child_glossary=None): """ Extend the number of children so that the parent object contains wanted children. No child will be removed if wanted_children is smaller than the current number of children. """ from cms.api import add_plugin current_children = parent.get_num_children() for _ in range(current_children, wanted_children): child = add_plugin(parent.placeholder, child_class, parent.language, target=parent) if isinstance(child_glossary, dict): child.glossary.update(child_glossary) child.save()
python
def extend_children(self, parent, wanted_children, child_class, child_glossary=None): """ Extend the number of children so that the parent object contains wanted children. No child will be removed if wanted_children is smaller than the current number of children. """ from cms.api import add_plugin current_children = parent.get_num_children() for _ in range(current_children, wanted_children): child = add_plugin(parent.placeholder, child_class, parent.language, target=parent) if isinstance(child_glossary, dict): child.glossary.update(child_glossary) child.save()
[ "def", "extend_children", "(", "self", ",", "parent", ",", "wanted_children", ",", "child_class", ",", "child_glossary", "=", "None", ")", ":", "from", "cms", ".", "api", "import", "add_plugin", "current_children", "=", "parent", ".", "get_num_children", "(", ")", "for", "_", "in", "range", "(", "current_children", ",", "wanted_children", ")", ":", "child", "=", "add_plugin", "(", "parent", ".", "placeholder", ",", "child_class", ",", "parent", ".", "language", ",", "target", "=", "parent", ")", "if", "isinstance", "(", "child_glossary", ",", "dict", ")", ":", "child", ".", "glossary", ".", "update", "(", "child_glossary", ")", "child", ".", "save", "(", ")" ]
Extend the number of children so that the parent object contains wanted children. No child will be removed if wanted_children is smaller than the current number of children.
[ "Extend", "the", "number", "of", "children", "so", "that", "the", "parent", "object", "contains", "wanted", "children", ".", "No", "child", "will", "be", "removed", "if", "wanted_children", "is", "smaller", "than", "the", "current", "number", "of", "children", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/plugin_base.py#L360-L371
5,519
jrief/djangocms-cascade
cmsplugin_cascade/plugin_base.py
CascadePluginBase.get_parent_instance
def get_parent_instance(self, request=None, obj=None): """ Get the parent model instance corresponding to this plugin. When adding a new plugin, the parent might not be available. Therefore as fallback, pass in the request object. """ try: parent_id = obj.parent_id except AttributeError: try: # TODO: self.parent presumably is not used anymore in CMS-3.4, because it doesn't # make sense anyway, since the plugin instances shall know their parents, not the # plugins. parent_id = self.parent.id except AttributeError: if request: parent_id = request.GET.get('plugin_parent', None) if parent_id is None: from cms.models import CMSPlugin try: parent_id = CMSPlugin.objects.filter(id=request.resolver_match.args[0] ).only("parent_id").order_by('?').first().parent_id except (AttributeError, IndexError): parent_id = None else: parent_id = None for model in CascadeModelBase._get_cascade_elements(): try: return model.objects.get(id=parent_id) except model.DoesNotExist: continue
python
def get_parent_instance(self, request=None, obj=None): """ Get the parent model instance corresponding to this plugin. When adding a new plugin, the parent might not be available. Therefore as fallback, pass in the request object. """ try: parent_id = obj.parent_id except AttributeError: try: # TODO: self.parent presumably is not used anymore in CMS-3.4, because it doesn't # make sense anyway, since the plugin instances shall know their parents, not the # plugins. parent_id = self.parent.id except AttributeError: if request: parent_id = request.GET.get('plugin_parent', None) if parent_id is None: from cms.models import CMSPlugin try: parent_id = CMSPlugin.objects.filter(id=request.resolver_match.args[0] ).only("parent_id").order_by('?').first().parent_id except (AttributeError, IndexError): parent_id = None else: parent_id = None for model in CascadeModelBase._get_cascade_elements(): try: return model.objects.get(id=parent_id) except model.DoesNotExist: continue
[ "def", "get_parent_instance", "(", "self", ",", "request", "=", "None", ",", "obj", "=", "None", ")", ":", "try", ":", "parent_id", "=", "obj", ".", "parent_id", "except", "AttributeError", ":", "try", ":", "# TODO: self.parent presumably is not used anymore in CMS-3.4, because it doesn't", "# make sense anyway, since the plugin instances shall know their parents, not the", "# plugins.", "parent_id", "=", "self", ".", "parent", ".", "id", "except", "AttributeError", ":", "if", "request", ":", "parent_id", "=", "request", ".", "GET", ".", "get", "(", "'plugin_parent'", ",", "None", ")", "if", "parent_id", "is", "None", ":", "from", "cms", ".", "models", "import", "CMSPlugin", "try", ":", "parent_id", "=", "CMSPlugin", ".", "objects", ".", "filter", "(", "id", "=", "request", ".", "resolver_match", ".", "args", "[", "0", "]", ")", ".", "only", "(", "\"parent_id\"", ")", ".", "order_by", "(", "'?'", ")", ".", "first", "(", ")", ".", "parent_id", "except", "(", "AttributeError", ",", "IndexError", ")", ":", "parent_id", "=", "None", "else", ":", "parent_id", "=", "None", "for", "model", "in", "CascadeModelBase", ".", "_get_cascade_elements", "(", ")", ":", "try", ":", "return", "model", ".", "objects", ".", "get", "(", "id", "=", "parent_id", ")", "except", "model", ".", "DoesNotExist", ":", "continue" ]
Get the parent model instance corresponding to this plugin. When adding a new plugin, the parent might not be available. Therefore as fallback, pass in the request object.
[ "Get", "the", "parent", "model", "instance", "corresponding", "to", "this", "plugin", ".", "When", "adding", "a", "new", "plugin", "the", "parent", "might", "not", "be", "available", ".", "Therefore", "as", "fallback", "pass", "in", "the", "request", "object", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/plugin_base.py#L403-L432
5,520
jrief/djangocms-cascade
cmsplugin_cascade/plugin_base.py
CascadePluginBase.in_edit_mode
def in_edit_mode(self, request, placeholder): """ Returns True, if the plugin is in "edit mode". """ toolbar = getattr(request, 'toolbar', None) edit_mode = getattr(toolbar, 'edit_mode', False) and getattr(placeholder, 'is_editable', True) if edit_mode: edit_mode = placeholder.has_change_permission(request.user) return edit_mode
python
def in_edit_mode(self, request, placeholder): """ Returns True, if the plugin is in "edit mode". """ toolbar = getattr(request, 'toolbar', None) edit_mode = getattr(toolbar, 'edit_mode', False) and getattr(placeholder, 'is_editable', True) if edit_mode: edit_mode = placeholder.has_change_permission(request.user) return edit_mode
[ "def", "in_edit_mode", "(", "self", ",", "request", ",", "placeholder", ")", ":", "toolbar", "=", "getattr", "(", "request", ",", "'toolbar'", ",", "None", ")", "edit_mode", "=", "getattr", "(", "toolbar", ",", "'edit_mode'", ",", "False", ")", "and", "getattr", "(", "placeholder", ",", "'is_editable'", ",", "True", ")", "if", "edit_mode", ":", "edit_mode", "=", "placeholder", ".", "has_change_permission", "(", "request", ".", "user", ")", "return", "edit_mode" ]
Returns True, if the plugin is in "edit mode".
[ "Returns", "True", "if", "the", "plugin", "is", "in", "edit", "mode", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/plugin_base.py#L480-L488
5,521
jrief/djangocms-cascade
cmsplugin_cascade/segmentation/cms_plugins.py
SegmentPlugin._get_previous_open_tag
def _get_previous_open_tag(self, obj): """ Return the open tag of the previous sibling """ prev_instance = self.get_previous_instance(obj) if prev_instance and prev_instance.plugin_type == self.__class__.__name__: return prev_instance.glossary.get('open_tag')
python
def _get_previous_open_tag(self, obj): """ Return the open tag of the previous sibling """ prev_instance = self.get_previous_instance(obj) if prev_instance and prev_instance.plugin_type == self.__class__.__name__: return prev_instance.glossary.get('open_tag')
[ "def", "_get_previous_open_tag", "(", "self", ",", "obj", ")", ":", "prev_instance", "=", "self", ".", "get_previous_instance", "(", "obj", ")", "if", "prev_instance", "and", "prev_instance", ".", "plugin_type", "==", "self", ".", "__class__", ".", "__name__", ":", "return", "prev_instance", ".", "glossary", ".", "get", "(", "'open_tag'", ")" ]
Return the open tag of the previous sibling
[ "Return", "the", "open", "tag", "of", "the", "previous", "sibling" ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/segmentation/cms_plugins.py#L170-L176
5,522
jrief/djangocms-cascade
cmsplugin_cascade/generic/mixins.py
SectionForm.check_unique_element_id
def check_unique_element_id(cls, instance, element_id): """ Check for uniqueness of the given element_id for the current page. Return None if instance is not yet associated with a page. """ try: element_ids = instance.placeholder.page.cascadepage.glossary.get('element_ids', {}) except (AttributeError, ObjectDoesNotExist): pass else: if element_id: for key, value in element_ids.items(): if str(key) != str(instance.pk) and element_id == value: msg = _("The element ID '{}' is not unique for this page.") raise ValidationError(msg.format(element_id))
python
def check_unique_element_id(cls, instance, element_id): """ Check for uniqueness of the given element_id for the current page. Return None if instance is not yet associated with a page. """ try: element_ids = instance.placeholder.page.cascadepage.glossary.get('element_ids', {}) except (AttributeError, ObjectDoesNotExist): pass else: if element_id: for key, value in element_ids.items(): if str(key) != str(instance.pk) and element_id == value: msg = _("The element ID '{}' is not unique for this page.") raise ValidationError(msg.format(element_id))
[ "def", "check_unique_element_id", "(", "cls", ",", "instance", ",", "element_id", ")", ":", "try", ":", "element_ids", "=", "instance", ".", "placeholder", ".", "page", ".", "cascadepage", ".", "glossary", ".", "get", "(", "'element_ids'", ",", "{", "}", ")", "except", "(", "AttributeError", ",", "ObjectDoesNotExist", ")", ":", "pass", "else", ":", "if", "element_id", ":", "for", "key", ",", "value", "in", "element_ids", ".", "items", "(", ")", ":", "if", "str", "(", "key", ")", "!=", "str", "(", "instance", ".", "pk", ")", "and", "element_id", "==", "value", ":", "msg", "=", "_", "(", "\"The element ID '{}' is not unique for this page.\"", ")", "raise", "ValidationError", "(", "msg", ".", "format", "(", "element_id", ")", ")" ]
Check for uniqueness of the given element_id for the current page. Return None if instance is not yet associated with a page.
[ "Check", "for", "uniqueness", "of", "the", "given", "element_id", "for", "the", "current", "page", ".", "Return", "None", "if", "instance", "is", "not", "yet", "associated", "with", "a", "page", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/generic/mixins.py#L20-L34
5,523
jrief/djangocms-cascade
cmsplugin_cascade/utils.py
rectify_partial_form_field
def rectify_partial_form_field(base_field, partial_form_fields): """ In base_field reset the attributes label and help_text, since they are overriden by the partial field. Additionally, from the list, or list of lists of partial_form_fields append the bound validator methods to the given base field. """ base_field.label = '' base_field.help_text = '' for fieldset in partial_form_fields: if not isinstance(fieldset, (list, tuple)): fieldset = [fieldset] for field in fieldset: base_field.validators.append(field.run_validators)
python
def rectify_partial_form_field(base_field, partial_form_fields): """ In base_field reset the attributes label and help_text, since they are overriden by the partial field. Additionally, from the list, or list of lists of partial_form_fields append the bound validator methods to the given base field. """ base_field.label = '' base_field.help_text = '' for fieldset in partial_form_fields: if not isinstance(fieldset, (list, tuple)): fieldset = [fieldset] for field in fieldset: base_field.validators.append(field.run_validators)
[ "def", "rectify_partial_form_field", "(", "base_field", ",", "partial_form_fields", ")", ":", "base_field", ".", "label", "=", "''", "base_field", ".", "help_text", "=", "''", "for", "fieldset", "in", "partial_form_fields", ":", "if", "not", "isinstance", "(", "fieldset", ",", "(", "list", ",", "tuple", ")", ")", ":", "fieldset", "=", "[", "fieldset", "]", "for", "field", "in", "fieldset", ":", "base_field", ".", "validators", ".", "append", "(", "field", ".", "run_validators", ")" ]
In base_field reset the attributes label and help_text, since they are overriden by the partial field. Additionally, from the list, or list of lists of partial_form_fields append the bound validator methods to the given base field.
[ "In", "base_field", "reset", "the", "attributes", "label", "and", "help_text", "since", "they", "are", "overriden", "by", "the", "partial", "field", ".", "Additionally", "from", "the", "list", "or", "list", "of", "lists", "of", "partial_form_fields", "append", "the", "bound", "validator", "methods", "to", "the", "given", "base", "field", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/utils.py#L16-L28
5,524
jrief/djangocms-cascade
cmsplugin_cascade/utils.py
validate_link
def validate_link(link_data): """ Check if the given model exists, otherwise raise a Validation error """ from django.apps import apps try: Model = apps.get_model(*link_data['model'].split('.')) Model.objects.get(pk=link_data['pk']) except Model.DoesNotExist: raise ValidationError(_("Unable to link onto '{0}'.").format(Model.__name__))
python
def validate_link(link_data): """ Check if the given model exists, otherwise raise a Validation error """ from django.apps import apps try: Model = apps.get_model(*link_data['model'].split('.')) Model.objects.get(pk=link_data['pk']) except Model.DoesNotExist: raise ValidationError(_("Unable to link onto '{0}'.").format(Model.__name__))
[ "def", "validate_link", "(", "link_data", ")", ":", "from", "django", ".", "apps", "import", "apps", "try", ":", "Model", "=", "apps", ".", "get_model", "(", "*", "link_data", "[", "'model'", "]", ".", "split", "(", "'.'", ")", ")", "Model", ".", "objects", ".", "get", "(", "pk", "=", "link_data", "[", "'pk'", "]", ")", "except", "Model", ".", "DoesNotExist", ":", "raise", "ValidationError", "(", "_", "(", "\"Unable to link onto '{0}'.\"", ")", ".", "format", "(", "Model", ".", "__name__", ")", ")" ]
Check if the given model exists, otherwise raise a Validation error
[ "Check", "if", "the", "given", "model", "exists", "otherwise", "raise", "a", "Validation", "error" ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/utils.py#L30-L40
5,525
jrief/djangocms-cascade
cmsplugin_cascade/utils.py
parse_responsive_length
def parse_responsive_length(responsive_length): """ Takes a string containing a length definition in pixels or percent and parses it to obtain a computational length. It returns a tuple where the first element is the length in pixels and the second element is its length in percent divided by 100. Note that one of both returned elements is None. """ responsive_length = responsive_length.strip() if responsive_length.endswith('px'): return (int(responsive_length.rstrip('px')), None) elif responsive_length.endswith('%'): return (None, float(responsive_length.rstrip('%')) / 100) return (None, None)
python
def parse_responsive_length(responsive_length): """ Takes a string containing a length definition in pixels or percent and parses it to obtain a computational length. It returns a tuple where the first element is the length in pixels and the second element is its length in percent divided by 100. Note that one of both returned elements is None. """ responsive_length = responsive_length.strip() if responsive_length.endswith('px'): return (int(responsive_length.rstrip('px')), None) elif responsive_length.endswith('%'): return (None, float(responsive_length.rstrip('%')) / 100) return (None, None)
[ "def", "parse_responsive_length", "(", "responsive_length", ")", ":", "responsive_length", "=", "responsive_length", ".", "strip", "(", ")", "if", "responsive_length", ".", "endswith", "(", "'px'", ")", ":", "return", "(", "int", "(", "responsive_length", ".", "rstrip", "(", "'px'", ")", ")", ",", "None", ")", "elif", "responsive_length", ".", "endswith", "(", "'%'", ")", ":", "return", "(", "None", ",", "float", "(", "responsive_length", ".", "rstrip", "(", "'%'", ")", ")", "/", "100", ")", "return", "(", "None", ",", "None", ")" ]
Takes a string containing a length definition in pixels or percent and parses it to obtain a computational length. It returns a tuple where the first element is the length in pixels and the second element is its length in percent divided by 100. Note that one of both returned elements is None.
[ "Takes", "a", "string", "containing", "a", "length", "definition", "in", "pixels", "or", "percent", "and", "parses", "it", "to", "obtain", "a", "computational", "length", ".", "It", "returns", "a", "tuple", "where", "the", "first", "element", "is", "the", "length", "in", "pixels", "and", "the", "second", "element", "is", "its", "length", "in", "percent", "divided", "by", "100", ".", "Note", "that", "one", "of", "both", "returned", "elements", "is", "None", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/utils.py#L71-L83
5,526
jrief/djangocms-cascade
cmsplugin_cascade/mixins.py
CascadePluginMixin.get_css_classes
def get_css_classes(cls, instance): """ Returns a list of CSS classes to be added as class="..." to the current HTML tag. """ css_classes = [] if hasattr(cls, 'default_css_class'): css_classes.append(cls.default_css_class) for attr in getattr(cls, 'default_css_attributes', []): css_class = instance.glossary.get(attr) if isinstance(css_class, six.string_types): css_classes.append(css_class) elif isinstance(css_class, list): css_classes.extend(css_class) return css_classes
python
def get_css_classes(cls, instance): """ Returns a list of CSS classes to be added as class="..." to the current HTML tag. """ css_classes = [] if hasattr(cls, 'default_css_class'): css_classes.append(cls.default_css_class) for attr in getattr(cls, 'default_css_attributes', []): css_class = instance.glossary.get(attr) if isinstance(css_class, six.string_types): css_classes.append(css_class) elif isinstance(css_class, list): css_classes.extend(css_class) return css_classes
[ "def", "get_css_classes", "(", "cls", ",", "instance", ")", ":", "css_classes", "=", "[", "]", "if", "hasattr", "(", "cls", ",", "'default_css_class'", ")", ":", "css_classes", ".", "append", "(", "cls", ".", "default_css_class", ")", "for", "attr", "in", "getattr", "(", "cls", ",", "'default_css_attributes'", ",", "[", "]", ")", ":", "css_class", "=", "instance", ".", "glossary", ".", "get", "(", "attr", ")", "if", "isinstance", "(", "css_class", ",", "six", ".", "string_types", ")", ":", "css_classes", ".", "append", "(", "css_class", ")", "elif", "isinstance", "(", "css_class", ",", "list", ")", ":", "css_classes", ".", "extend", "(", "css_class", ")", "return", "css_classes" ]
Returns a list of CSS classes to be added as class="..." to the current HTML tag.
[ "Returns", "a", "list", "of", "CSS", "classes", "to", "be", "added", "as", "class", "=", "...", "to", "the", "current", "HTML", "tag", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/mixins.py#L27-L40
5,527
jrief/djangocms-cascade
cmsplugin_cascade/mixins.py
CascadePluginMixin.get_inline_styles
def get_inline_styles(cls, instance): """ Returns a dictionary of CSS attributes to be added as style="..." to the current HTML tag. """ inline_styles = getattr(cls, 'default_inline_styles', {}) css_style = instance.glossary.get('inline_styles') if css_style: inline_styles.update(css_style) return inline_styles
python
def get_inline_styles(cls, instance): """ Returns a dictionary of CSS attributes to be added as style="..." to the current HTML tag. """ inline_styles = getattr(cls, 'default_inline_styles', {}) css_style = instance.glossary.get('inline_styles') if css_style: inline_styles.update(css_style) return inline_styles
[ "def", "get_inline_styles", "(", "cls", ",", "instance", ")", ":", "inline_styles", "=", "getattr", "(", "cls", ",", "'default_inline_styles'", ",", "{", "}", ")", "css_style", "=", "instance", ".", "glossary", ".", "get", "(", "'inline_styles'", ")", "if", "css_style", ":", "inline_styles", ".", "update", "(", "css_style", ")", "return", "inline_styles" ]
Returns a dictionary of CSS attributes to be added as style="..." to the current HTML tag.
[ "Returns", "a", "dictionary", "of", "CSS", "attributes", "to", "be", "added", "as", "style", "=", "...", "to", "the", "current", "HTML", "tag", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/mixins.py#L43-L51
5,528
jrief/djangocms-cascade
cmsplugin_cascade/mixins.py
CascadePluginMixin.get_html_tag_attributes
def get_html_tag_attributes(cls, instance): """ Returns a dictionary of attributes, which shall be added to the current HTML tag. This method normally is called by the models's property method ``html_tag_ attributes``, which enriches the HTML tag with those attributes converted to a list as ``attr1="val1" attr2="val2" ...``. """ attributes = getattr(cls, 'html_tag_attributes', {}) return dict((attr, instance.glossary.get(key, '')) for key, attr in attributes.items())
python
def get_html_tag_attributes(cls, instance): """ Returns a dictionary of attributes, which shall be added to the current HTML tag. This method normally is called by the models's property method ``html_tag_ attributes``, which enriches the HTML tag with those attributes converted to a list as ``attr1="val1" attr2="val2" ...``. """ attributes = getattr(cls, 'html_tag_attributes', {}) return dict((attr, instance.glossary.get(key, '')) for key, attr in attributes.items())
[ "def", "get_html_tag_attributes", "(", "cls", ",", "instance", ")", ":", "attributes", "=", "getattr", "(", "cls", ",", "'html_tag_attributes'", ",", "{", "}", ")", "return", "dict", "(", "(", "attr", ",", "instance", ".", "glossary", ".", "get", "(", "key", ",", "''", ")", ")", "for", "key", ",", "attr", "in", "attributes", ".", "items", "(", ")", ")" ]
Returns a dictionary of attributes, which shall be added to the current HTML tag. This method normally is called by the models's property method ``html_tag_ attributes``, which enriches the HTML tag with those attributes converted to a list as ``attr1="val1" attr2="val2" ...``.
[ "Returns", "a", "dictionary", "of", "attributes", "which", "shall", "be", "added", "to", "the", "current", "HTML", "tag", ".", "This", "method", "normally", "is", "called", "by", "the", "models", "s", "property", "method", "html_tag_", "attributes", "which", "enriches", "the", "HTML", "tag", "with", "those", "attributes", "converted", "to", "a", "list", "as", "attr1", "=", "val1", "attr2", "=", "val2", "...", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/mixins.py#L54-L62
5,529
jrief/djangocms-cascade
cmsplugin_cascade/widgets.py
CascadingSizeWidgetMixin.compile_validation_pattern
def compile_validation_pattern(self, units=None): """ Assure that passed in units are valid size units, or if missing, use all possible units. Return a tuple with a regular expression to be used for validating and an error message in case this validation failed. """ if units is None: units = list(self.POSSIBLE_UNITS) else: for u in units: if u not in self.POSSIBLE_UNITS: raise ValidationError('{} is not a valid unit for a size field'.format(u)) regex = re.compile(r'^(-?\d+)({})$'.format('|'.join(units))) endings = (' %s ' % ugettext("or")).join("'%s'" % u.replace('%', '%%') for u in units) params = {'label': '%(label)s', 'value': '%(value)s', 'field': '%(field)s', 'endings': endings} return regex, self.invalid_message % params
python
def compile_validation_pattern(self, units=None): """ Assure that passed in units are valid size units, or if missing, use all possible units. Return a tuple with a regular expression to be used for validating and an error message in case this validation failed. """ if units is None: units = list(self.POSSIBLE_UNITS) else: for u in units: if u not in self.POSSIBLE_UNITS: raise ValidationError('{} is not a valid unit for a size field'.format(u)) regex = re.compile(r'^(-?\d+)({})$'.format('|'.join(units))) endings = (' %s ' % ugettext("or")).join("'%s'" % u.replace('%', '%%') for u in units) params = {'label': '%(label)s', 'value': '%(value)s', 'field': '%(field)s', 'endings': endings} return regex, self.invalid_message % params
[ "def", "compile_validation_pattern", "(", "self", ",", "units", "=", "None", ")", ":", "if", "units", "is", "None", ":", "units", "=", "list", "(", "self", ".", "POSSIBLE_UNITS", ")", "else", ":", "for", "u", "in", "units", ":", "if", "u", "not", "in", "self", ".", "POSSIBLE_UNITS", ":", "raise", "ValidationError", "(", "'{} is not a valid unit for a size field'", ".", "format", "(", "u", ")", ")", "regex", "=", "re", ".", "compile", "(", "r'^(-?\\d+)({})$'", ".", "format", "(", "'|'", ".", "join", "(", "units", ")", ")", ")", "endings", "=", "(", "' %s '", "%", "ugettext", "(", "\"or\"", ")", ")", ".", "join", "(", "\"'%s'\"", "%", "u", ".", "replace", "(", "'%'", ",", "'%%'", ")", "for", "u", "in", "units", ")", "params", "=", "{", "'label'", ":", "'%(label)s'", ",", "'value'", ":", "'%(value)s'", ",", "'field'", ":", "'%(field)s'", ",", "'endings'", ":", "endings", "}", "return", "regex", ",", "self", ".", "invalid_message", "%", "params" ]
Assure that passed in units are valid size units, or if missing, use all possible units. Return a tuple with a regular expression to be used for validating and an error message in case this validation failed.
[ "Assure", "that", "passed", "in", "units", "are", "valid", "size", "units", "or", "if", "missing", "use", "all", "possible", "units", ".", "Return", "a", "tuple", "with", "a", "regular", "expression", "to", "be", "used", "for", "validating", "and", "an", "error", "message", "in", "case", "this", "validation", "failed", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/widgets.py#L108-L123
5,530
jrief/djangocms-cascade
cmsplugin_cascade/link/forms.py
LinkForm.unset_required_for
def unset_required_for(cls, sharable_fields): """ Fields borrowed by `SharedGlossaryAdmin` to build its temporary change form, only are required if they are declared in `sharable_fields`. Otherwise just deactivate them. """ if 'link_content' in cls.base_fields and 'link_content' not in sharable_fields: cls.base_fields['link_content'].required = False if 'link_type' in cls.base_fields and 'link' not in sharable_fields: cls.base_fields['link_type'].required = False
python
def unset_required_for(cls, sharable_fields): """ Fields borrowed by `SharedGlossaryAdmin` to build its temporary change form, only are required if they are declared in `sharable_fields`. Otherwise just deactivate them. """ if 'link_content' in cls.base_fields and 'link_content' not in sharable_fields: cls.base_fields['link_content'].required = False if 'link_type' in cls.base_fields and 'link' not in sharable_fields: cls.base_fields['link_type'].required = False
[ "def", "unset_required_for", "(", "cls", ",", "sharable_fields", ")", ":", "if", "'link_content'", "in", "cls", ".", "base_fields", "and", "'link_content'", "not", "in", "sharable_fields", ":", "cls", ".", "base_fields", "[", "'link_content'", "]", ".", "required", "=", "False", "if", "'link_type'", "in", "cls", ".", "base_fields", "and", "'link'", "not", "in", "sharable_fields", ":", "cls", ".", "base_fields", "[", "'link_type'", "]", ".", "required", "=", "False" ]
Fields borrowed by `SharedGlossaryAdmin` to build its temporary change form, only are required if they are declared in `sharable_fields`. Otherwise just deactivate them.
[ "Fields", "borrowed", "by", "SharedGlossaryAdmin", "to", "build", "its", "temporary", "change", "form", "only", "are", "required", "if", "they", "are", "declared", "in", "sharable_fields", ".", "Otherwise", "just", "deactivate", "them", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/link/forms.py#L260-L268
5,531
jrief/djangocms-cascade
cmsplugin_cascade/models.py
CascadePage.assure_relation
def assure_relation(cls, cms_page): """ Assure that we have a foreign key relation, pointing from CascadePage onto CMSPage. """ try: cms_page.cascadepage except cls.DoesNotExist: cls.objects.create(extended_object=cms_page)
python
def assure_relation(cls, cms_page): """ Assure that we have a foreign key relation, pointing from CascadePage onto CMSPage. """ try: cms_page.cascadepage except cls.DoesNotExist: cls.objects.create(extended_object=cms_page)
[ "def", "assure_relation", "(", "cls", ",", "cms_page", ")", ":", "try", ":", "cms_page", ".", "cascadepage", "except", "cls", ".", "DoesNotExist", ":", "cls", ".", "objects", ".", "create", "(", "extended_object", "=", "cms_page", ")" ]
Assure that we have a foreign key relation, pointing from CascadePage onto CMSPage.
[ "Assure", "that", "we", "have", "a", "foreign", "key", "relation", "pointing", "from", "CascadePage", "onto", "CMSPage", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/models.py#L371-L378
5,532
jrief/djangocms-cascade
cmsplugin_cascade/bootstrap3/utils.py
compute_media_queries
def compute_media_queries(element): """ For e given Cascade element, compute the current media queries for each breakpoint, even for nested containers, rows and columns. """ parent_glossary = element.get_parent_glossary() # compute the max width and the required media queries for each chosen breakpoint element.glossary['container_max_widths'] = max_widths = {} element.glossary['media_queries'] = media_queries = {} breakpoints = element.glossary.get('breakpoints', parent_glossary.get('breakpoints', [])) last_index = len(breakpoints) - 1 fluid = element.glossary.get('fluid') for index, bp in enumerate(breakpoints): try: key = 'container_fluid_max_widths' if fluid else 'container_max_widths' max_widths[bp] = parent_glossary[key][bp] except KeyError: max_widths[bp] = BS3_BREAKPOINTS[bp][4 if fluid else 3] if last_index > 0: if index == 0: next_bp = breakpoints[1] media_queries[bp] = ['(max-width: {0}px)'.format(BS3_BREAKPOINTS[next_bp][0])] elif index == last_index: media_queries[bp] = ['(min-width: {0}px)'.format(BS3_BREAKPOINTS[bp][0])] else: next_bp = breakpoints[index + 1] media_queries[bp] = ['(min-width: {0}px)'.format(BS3_BREAKPOINTS[bp][0]), '(max-width: {0}px)'.format(BS3_BREAKPOINTS[next_bp][0])]
python
def compute_media_queries(element): """ For e given Cascade element, compute the current media queries for each breakpoint, even for nested containers, rows and columns. """ parent_glossary = element.get_parent_glossary() # compute the max width and the required media queries for each chosen breakpoint element.glossary['container_max_widths'] = max_widths = {} element.glossary['media_queries'] = media_queries = {} breakpoints = element.glossary.get('breakpoints', parent_glossary.get('breakpoints', [])) last_index = len(breakpoints) - 1 fluid = element.glossary.get('fluid') for index, bp in enumerate(breakpoints): try: key = 'container_fluid_max_widths' if fluid else 'container_max_widths' max_widths[bp] = parent_glossary[key][bp] except KeyError: max_widths[bp] = BS3_BREAKPOINTS[bp][4 if fluid else 3] if last_index > 0: if index == 0: next_bp = breakpoints[1] media_queries[bp] = ['(max-width: {0}px)'.format(BS3_BREAKPOINTS[next_bp][0])] elif index == last_index: media_queries[bp] = ['(min-width: {0}px)'.format(BS3_BREAKPOINTS[bp][0])] else: next_bp = breakpoints[index + 1] media_queries[bp] = ['(min-width: {0}px)'.format(BS3_BREAKPOINTS[bp][0]), '(max-width: {0}px)'.format(BS3_BREAKPOINTS[next_bp][0])]
[ "def", "compute_media_queries", "(", "element", ")", ":", "parent_glossary", "=", "element", ".", "get_parent_glossary", "(", ")", "# compute the max width and the required media queries for each chosen breakpoint", "element", ".", "glossary", "[", "'container_max_widths'", "]", "=", "max_widths", "=", "{", "}", "element", ".", "glossary", "[", "'media_queries'", "]", "=", "media_queries", "=", "{", "}", "breakpoints", "=", "element", ".", "glossary", ".", "get", "(", "'breakpoints'", ",", "parent_glossary", ".", "get", "(", "'breakpoints'", ",", "[", "]", ")", ")", "last_index", "=", "len", "(", "breakpoints", ")", "-", "1", "fluid", "=", "element", ".", "glossary", ".", "get", "(", "'fluid'", ")", "for", "index", ",", "bp", "in", "enumerate", "(", "breakpoints", ")", ":", "try", ":", "key", "=", "'container_fluid_max_widths'", "if", "fluid", "else", "'container_max_widths'", "max_widths", "[", "bp", "]", "=", "parent_glossary", "[", "key", "]", "[", "bp", "]", "except", "KeyError", ":", "max_widths", "[", "bp", "]", "=", "BS3_BREAKPOINTS", "[", "bp", "]", "[", "4", "if", "fluid", "else", "3", "]", "if", "last_index", ">", "0", ":", "if", "index", "==", "0", ":", "next_bp", "=", "breakpoints", "[", "1", "]", "media_queries", "[", "bp", "]", "=", "[", "'(max-width: {0}px)'", ".", "format", "(", "BS3_BREAKPOINTS", "[", "next_bp", "]", "[", "0", "]", ")", "]", "elif", "index", "==", "last_index", ":", "media_queries", "[", "bp", "]", "=", "[", "'(min-width: {0}px)'", ".", "format", "(", "BS3_BREAKPOINTS", "[", "bp", "]", "[", "0", "]", ")", "]", "else", ":", "next_bp", "=", "breakpoints", "[", "index", "+", "1", "]", "media_queries", "[", "bp", "]", "=", "[", "'(min-width: {0}px)'", ".", "format", "(", "BS3_BREAKPOINTS", "[", "bp", "]", "[", "0", "]", ")", ",", "'(max-width: {0}px)'", ".", "format", "(", "BS3_BREAKPOINTS", "[", "next_bp", "]", "[", "0", "]", ")", "]" ]
For e given Cascade element, compute the current media queries for each breakpoint, even for nested containers, rows and columns.
[ "For", "e", "given", "Cascade", "element", "compute", "the", "current", "media", "queries", "for", "each", "breakpoint", "even", "for", "nested", "containers", "rows", "and", "columns", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/bootstrap3/utils.py#L63-L90
5,533
jrief/djangocms-cascade
cmsplugin_cascade/fields.py
GlossaryField.get_element_ids
def get_element_ids(self, prefix_id): """ Returns a single or a list of element ids, one for each input widget of this field """ if isinstance(self.widget, widgets.MultiWidget): ids = ['{0}_{1}_{2}'.format(prefix_id, self.name, field_name) for field_name in self.widget] elif isinstance(self.widget, (widgets.SelectMultiple, widgets.RadioSelect)): ids = ['{0}_{1}_{2}'.format(prefix_id, self.name, k) for k in range(len(self.widget.choices))] else: ids = ['{0}_{1}'.format(prefix_id, self.name)] return ids
python
def get_element_ids(self, prefix_id): """ Returns a single or a list of element ids, one for each input widget of this field """ if isinstance(self.widget, widgets.MultiWidget): ids = ['{0}_{1}_{2}'.format(prefix_id, self.name, field_name) for field_name in self.widget] elif isinstance(self.widget, (widgets.SelectMultiple, widgets.RadioSelect)): ids = ['{0}_{1}_{2}'.format(prefix_id, self.name, k) for k in range(len(self.widget.choices))] else: ids = ['{0}_{1}'.format(prefix_id, self.name)] return ids
[ "def", "get_element_ids", "(", "self", ",", "prefix_id", ")", ":", "if", "isinstance", "(", "self", ".", "widget", ",", "widgets", ".", "MultiWidget", ")", ":", "ids", "=", "[", "'{0}_{1}_{2}'", ".", "format", "(", "prefix_id", ",", "self", ".", "name", ",", "field_name", ")", "for", "field_name", "in", "self", ".", "widget", "]", "elif", "isinstance", "(", "self", ".", "widget", ",", "(", "widgets", ".", "SelectMultiple", ",", "widgets", ".", "RadioSelect", ")", ")", ":", "ids", "=", "[", "'{0}_{1}_{2}'", ".", "format", "(", "prefix_id", ",", "self", ".", "name", ",", "k", ")", "for", "k", "in", "range", "(", "len", "(", "self", ".", "widget", ".", "choices", ")", ")", "]", "else", ":", "ids", "=", "[", "'{0}_{1}'", ".", "format", "(", "prefix_id", ",", "self", ".", "name", ")", "]", "return", "ids" ]
Returns a single or a list of element ids, one for each input widget of this field
[ "Returns", "a", "single", "or", "a", "list", "of", "element", "ids", "one", "for", "each", "input", "widget", "of", "this", "field" ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/fields.py#L50-L60
5,534
jrief/djangocms-cascade
cmsplugin_cascade/segmentation/mixins.py
EmulateUserModelMixin.get_context_override
def get_context_override(self, request): """ Override the request object with an emulated user. """ context_override = super(EmulateUserModelMixin, self).get_context_override(request) try: if request.user.is_staff: user = self.UserModel.objects.get(pk=request.session['emulate_user_id']) context_override.update(user=user) except (self.UserModel.DoesNotExist, KeyError): pass return context_override
python
def get_context_override(self, request): """ Override the request object with an emulated user. """ context_override = super(EmulateUserModelMixin, self).get_context_override(request) try: if request.user.is_staff: user = self.UserModel.objects.get(pk=request.session['emulate_user_id']) context_override.update(user=user) except (self.UserModel.DoesNotExist, KeyError): pass return context_override
[ "def", "get_context_override", "(", "self", ",", "request", ")", ":", "context_override", "=", "super", "(", "EmulateUserModelMixin", ",", "self", ")", ".", "get_context_override", "(", "request", ")", "try", ":", "if", "request", ".", "user", ".", "is_staff", ":", "user", "=", "self", ".", "UserModel", ".", "objects", ".", "get", "(", "pk", "=", "request", ".", "session", "[", "'emulate_user_id'", "]", ")", "context_override", ".", "update", "(", "user", "=", "user", ")", "except", "(", "self", ".", "UserModel", ".", "DoesNotExist", ",", "KeyError", ")", ":", "pass", "return", "context_override" ]
Override the request object with an emulated user.
[ "Override", "the", "request", "object", "with", "an", "emulated", "user", "." ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/segmentation/mixins.py#L48-L59
5,535
jrief/djangocms-cascade
cmsplugin_cascade/segmentation/mixins.py
EmulateUserAdminMixin.emulate_users
def emulate_users(self, request): """ The list view """ def display_as_link(self, obj): try: identifier = getattr(user_model_admin, list_display_link)(obj) except AttributeError: identifier = admin.utils.lookup_field(list_display_link, obj, model_admin=self)[2] emulate_user_id = request.session.get('emulate_user_id') if emulate_user_id == obj.id: return format_html('<strong>{}</strong>', identifier) fmtargs = { 'href': reverse('admin:emulate-user', kwargs={'user_id': obj.id}), 'identifier': identifier, } return format_html('<a href="{href}" class="emulate-user">{identifier}</a>', **fmtargs) opts = self.UserModel._meta app_label = opts.app_label user_model_admin = self.admin_site._registry[self.UserModel] request._lookup_model = self.UserModel list_display_links = user_model_admin.get_list_display_links(request, user_model_admin.list_display) # replace first entry in list_display_links by customized method display_as_link list_display_link = list_display_links[0] try: list_display = list(user_model_admin.segmentation_list_display) except AttributeError: list_display = list(user_model_admin.list_display) list_display.remove(list_display_link) list_display.insert(0, 'display_as_link') display_as_link.allow_tags = True # TODO: presumably not required anymore since Django-1.9 try: display_as_link.short_description = user_model_admin.identifier.short_description except AttributeError: display_as_link.short_description = admin.utils.label_for_field(list_display_link, self.UserModel) self.display_as_link = six.create_bound_method(display_as_link, self) ChangeList = self.get_changelist(request) cl = ChangeList(request, self.UserModel, list_display, (None,), # disable list_display_links in ChangeList, instead override that field user_model_admin.list_filter, user_model_admin.date_hierarchy, user_model_admin.search_fields, user_model_admin.list_select_related, user_model_admin.list_per_page, user_model_admin.list_max_show_all, (), # disable list_editable self) cl.formset = None selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count) context = { 'module_name': force_text(opts.verbose_name_plural), 'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)}, 'selection_note_all': selection_note_all % {'total_count': cl.result_count}, 'title': _("Select %(user_model)s to emulate") % {'user_model': opts.verbose_name}, 'is_popup': cl.is_popup, 'cl': cl, 'media': self.media, 'has_add_permission': False, 'opts': cl.opts, 'app_label': app_label, 'actions_on_top': self.actions_on_top, 'actions_on_bottom': self.actions_on_bottom, 'actions_selection_counter': self.actions_selection_counter, 'preserved_filters': self.get_preserved_filters(request), } return TemplateResponse(request, self.change_list_template or [ 'admin/%s/%s/change_list.html' % (app_label, opts.model_name), 'admin/%s/change_list.html' % app_label, 'admin/change_list.html' ], context)
python
def emulate_users(self, request): """ The list view """ def display_as_link(self, obj): try: identifier = getattr(user_model_admin, list_display_link)(obj) except AttributeError: identifier = admin.utils.lookup_field(list_display_link, obj, model_admin=self)[2] emulate_user_id = request.session.get('emulate_user_id') if emulate_user_id == obj.id: return format_html('<strong>{}</strong>', identifier) fmtargs = { 'href': reverse('admin:emulate-user', kwargs={'user_id': obj.id}), 'identifier': identifier, } return format_html('<a href="{href}" class="emulate-user">{identifier}</a>', **fmtargs) opts = self.UserModel._meta app_label = opts.app_label user_model_admin = self.admin_site._registry[self.UserModel] request._lookup_model = self.UserModel list_display_links = user_model_admin.get_list_display_links(request, user_model_admin.list_display) # replace first entry in list_display_links by customized method display_as_link list_display_link = list_display_links[0] try: list_display = list(user_model_admin.segmentation_list_display) except AttributeError: list_display = list(user_model_admin.list_display) list_display.remove(list_display_link) list_display.insert(0, 'display_as_link') display_as_link.allow_tags = True # TODO: presumably not required anymore since Django-1.9 try: display_as_link.short_description = user_model_admin.identifier.short_description except AttributeError: display_as_link.short_description = admin.utils.label_for_field(list_display_link, self.UserModel) self.display_as_link = six.create_bound_method(display_as_link, self) ChangeList = self.get_changelist(request) cl = ChangeList(request, self.UserModel, list_display, (None,), # disable list_display_links in ChangeList, instead override that field user_model_admin.list_filter, user_model_admin.date_hierarchy, user_model_admin.search_fields, user_model_admin.list_select_related, user_model_admin.list_per_page, user_model_admin.list_max_show_all, (), # disable list_editable self) cl.formset = None selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count) context = { 'module_name': force_text(opts.verbose_name_plural), 'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)}, 'selection_note_all': selection_note_all % {'total_count': cl.result_count}, 'title': _("Select %(user_model)s to emulate") % {'user_model': opts.verbose_name}, 'is_popup': cl.is_popup, 'cl': cl, 'media': self.media, 'has_add_permission': False, 'opts': cl.opts, 'app_label': app_label, 'actions_on_top': self.actions_on_top, 'actions_on_bottom': self.actions_on_bottom, 'actions_selection_counter': self.actions_selection_counter, 'preserved_filters': self.get_preserved_filters(request), } return TemplateResponse(request, self.change_list_template or [ 'admin/%s/%s/change_list.html' % (app_label, opts.model_name), 'admin/%s/change_list.html' % app_label, 'admin/change_list.html' ], context)
[ "def", "emulate_users", "(", "self", ",", "request", ")", ":", "def", "display_as_link", "(", "self", ",", "obj", ")", ":", "try", ":", "identifier", "=", "getattr", "(", "user_model_admin", ",", "list_display_link", ")", "(", "obj", ")", "except", "AttributeError", ":", "identifier", "=", "admin", ".", "utils", ".", "lookup_field", "(", "list_display_link", ",", "obj", ",", "model_admin", "=", "self", ")", "[", "2", "]", "emulate_user_id", "=", "request", ".", "session", ".", "get", "(", "'emulate_user_id'", ")", "if", "emulate_user_id", "==", "obj", ".", "id", ":", "return", "format_html", "(", "'<strong>{}</strong>'", ",", "identifier", ")", "fmtargs", "=", "{", "'href'", ":", "reverse", "(", "'admin:emulate-user'", ",", "kwargs", "=", "{", "'user_id'", ":", "obj", ".", "id", "}", ")", ",", "'identifier'", ":", "identifier", ",", "}", "return", "format_html", "(", "'<a href=\"{href}\" class=\"emulate-user\">{identifier}</a>'", ",", "*", "*", "fmtargs", ")", "opts", "=", "self", ".", "UserModel", ".", "_meta", "app_label", "=", "opts", ".", "app_label", "user_model_admin", "=", "self", ".", "admin_site", ".", "_registry", "[", "self", ".", "UserModel", "]", "request", ".", "_lookup_model", "=", "self", ".", "UserModel", "list_display_links", "=", "user_model_admin", ".", "get_list_display_links", "(", "request", ",", "user_model_admin", ".", "list_display", ")", "# replace first entry in list_display_links by customized method display_as_link", "list_display_link", "=", "list_display_links", "[", "0", "]", "try", ":", "list_display", "=", "list", "(", "user_model_admin", ".", "segmentation_list_display", ")", "except", "AttributeError", ":", "list_display", "=", "list", "(", "user_model_admin", ".", "list_display", ")", "list_display", ".", "remove", "(", "list_display_link", ")", "list_display", ".", "insert", "(", "0", ",", "'display_as_link'", ")", "display_as_link", ".", "allow_tags", "=", "True", "# TODO: presumably not required anymore since Django-1.9", "try", ":", "display_as_link", ".", "short_description", "=", "user_model_admin", ".", "identifier", ".", "short_description", "except", "AttributeError", ":", "display_as_link", ".", "short_description", "=", "admin", ".", "utils", ".", "label_for_field", "(", "list_display_link", ",", "self", ".", "UserModel", ")", "self", ".", "display_as_link", "=", "six", ".", "create_bound_method", "(", "display_as_link", ",", "self", ")", "ChangeList", "=", "self", ".", "get_changelist", "(", "request", ")", "cl", "=", "ChangeList", "(", "request", ",", "self", ".", "UserModel", ",", "list_display", ",", "(", "None", ",", ")", ",", "# disable list_display_links in ChangeList, instead override that field", "user_model_admin", ".", "list_filter", ",", "user_model_admin", ".", "date_hierarchy", ",", "user_model_admin", ".", "search_fields", ",", "user_model_admin", ".", "list_select_related", ",", "user_model_admin", ".", "list_per_page", ",", "user_model_admin", ".", "list_max_show_all", ",", "(", ")", ",", "# disable list_editable", "self", ")", "cl", ".", "formset", "=", "None", "selection_note_all", "=", "ungettext", "(", "'%(total_count)s selected'", ",", "'All %(total_count)s selected'", ",", "cl", ".", "result_count", ")", "context", "=", "{", "'module_name'", ":", "force_text", "(", "opts", ".", "verbose_name_plural", ")", ",", "'selection_note'", ":", "_", "(", "'0 of %(cnt)s selected'", ")", "%", "{", "'cnt'", ":", "len", "(", "cl", ".", "result_list", ")", "}", ",", "'selection_note_all'", ":", "selection_note_all", "%", "{", "'total_count'", ":", "cl", ".", "result_count", "}", ",", "'title'", ":", "_", "(", "\"Select %(user_model)s to emulate\"", ")", "%", "{", "'user_model'", ":", "opts", ".", "verbose_name", "}", ",", "'is_popup'", ":", "cl", ".", "is_popup", ",", "'cl'", ":", "cl", ",", "'media'", ":", "self", ".", "media", ",", "'has_add_permission'", ":", "False", ",", "'opts'", ":", "cl", ".", "opts", ",", "'app_label'", ":", "app_label", ",", "'actions_on_top'", ":", "self", ".", "actions_on_top", ",", "'actions_on_bottom'", ":", "self", ".", "actions_on_bottom", ",", "'actions_selection_counter'", ":", "self", ".", "actions_selection_counter", ",", "'preserved_filters'", ":", "self", ".", "get_preserved_filters", "(", "request", ")", ",", "}", "return", "TemplateResponse", "(", "request", ",", "self", ".", "change_list_template", "or", "[", "'admin/%s/%s/change_list.html'", "%", "(", "app_label", ",", "opts", ".", "model_name", ")", ",", "'admin/%s/change_list.html'", "%", "app_label", ",", "'admin/change_list.html'", "]", ",", "context", ")" ]
The list view
[ "The", "list", "view" ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/segmentation/mixins.py#L88-L159
5,536
jrief/djangocms-cascade
cmsplugin_cascade/apps.py
CascadeConfig.pre_migrate
def pre_migrate(cls, sender=None, **kwargs): """ Iterate over contenttypes and remove those not in proxy models """ ContentType = apps.get_model('contenttypes', 'ContentType') try: queryset = ContentType.objects.filter(app_label=sender.label) for ctype in queryset.exclude(model__in=sender.get_proxy_models().keys()): model = ctype.model_class() if model is None: sender.revoke_permissions(ctype) ContentType.objects.get(app_label=sender.label, model=ctype).delete() except DatabaseError: return
python
def pre_migrate(cls, sender=None, **kwargs): """ Iterate over contenttypes and remove those not in proxy models """ ContentType = apps.get_model('contenttypes', 'ContentType') try: queryset = ContentType.objects.filter(app_label=sender.label) for ctype in queryset.exclude(model__in=sender.get_proxy_models().keys()): model = ctype.model_class() if model is None: sender.revoke_permissions(ctype) ContentType.objects.get(app_label=sender.label, model=ctype).delete() except DatabaseError: return
[ "def", "pre_migrate", "(", "cls", ",", "sender", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ContentType", "=", "apps", ".", "get_model", "(", "'contenttypes'", ",", "'ContentType'", ")", "try", ":", "queryset", "=", "ContentType", ".", "objects", ".", "filter", "(", "app_label", "=", "sender", ".", "label", ")", "for", "ctype", "in", "queryset", ".", "exclude", "(", "model__in", "=", "sender", ".", "get_proxy_models", "(", ")", ".", "keys", "(", ")", ")", ":", "model", "=", "ctype", ".", "model_class", "(", ")", "if", "model", "is", "None", ":", "sender", ".", "revoke_permissions", "(", "ctype", ")", "ContentType", ".", "objects", ".", "get", "(", "app_label", "=", "sender", ".", "label", ",", "model", "=", "ctype", ")", ".", "delete", "(", ")", "except", "DatabaseError", ":", "return" ]
Iterate over contenttypes and remove those not in proxy models
[ "Iterate", "over", "contenttypes", "and", "remove", "those", "not", "in", "proxy", "models" ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/apps.py#L29-L42
5,537
jrief/djangocms-cascade
cmsplugin_cascade/apps.py
CascadeConfig.post_migrate
def post_migrate(cls, sender=None, **kwargs): """ Iterate over fake_proxy_models and add contenttypes and permissions for missing proxy models, if this has not been done by Django yet """ ContentType = apps.get_model('contenttypes', 'ContentType') for model_name, proxy_model in sender.get_proxy_models().items(): ctype, created = ContentType.objects.get_or_create(app_label=sender.label, model=model_name) if created: sender.grant_permissions(proxy_model)
python
def post_migrate(cls, sender=None, **kwargs): """ Iterate over fake_proxy_models and add contenttypes and permissions for missing proxy models, if this has not been done by Django yet """ ContentType = apps.get_model('contenttypes', 'ContentType') for model_name, proxy_model in sender.get_proxy_models().items(): ctype, created = ContentType.objects.get_or_create(app_label=sender.label, model=model_name) if created: sender.grant_permissions(proxy_model)
[ "def", "post_migrate", "(", "cls", ",", "sender", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ContentType", "=", "apps", ".", "get_model", "(", "'contenttypes'", ",", "'ContentType'", ")", "for", "model_name", ",", "proxy_model", "in", "sender", ".", "get_proxy_models", "(", ")", ".", "items", "(", ")", ":", "ctype", ",", "created", "=", "ContentType", ".", "objects", ".", "get_or_create", "(", "app_label", "=", "sender", ".", "label", ",", "model", "=", "model_name", ")", "if", "created", ":", "sender", ".", "grant_permissions", "(", "proxy_model", ")" ]
Iterate over fake_proxy_models and add contenttypes and permissions for missing proxy models, if this has not been done by Django yet
[ "Iterate", "over", "fake_proxy_models", "and", "add", "contenttypes", "and", "permissions", "for", "missing", "proxy", "models", "if", "this", "has", "not", "been", "done", "by", "Django", "yet" ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/apps.py#L45-L55
5,538
jrief/djangocms-cascade
cmsplugin_cascade/apps.py
CascadeConfig.grant_permissions
def grant_permissions(self, proxy_model): """ Create the default permissions for the just added proxy model """ ContentType = apps.get_model('contenttypes', 'ContentType') try: Permission = apps.get_model('auth', 'Permission') except LookupError: return # searched_perms will hold the permissions we're looking for as (content_type, (codename, name)) searched_perms = [] ctype = ContentType.objects.get_for_model(proxy_model) for perm in self.default_permissions: searched_perms.append(( '{0}_{1}'.format(perm, proxy_model._meta.model_name), "Can {0} {1}".format(perm, proxy_model._meta.verbose_name_raw) )) all_perms = set(Permission.objects.filter( content_type=ctype, ).values_list( 'content_type', 'codename' )) permissions = [ Permission(codename=codename, name=name, content_type=ctype) for codename, name in searched_perms if (ctype.pk, codename) not in all_perms ] Permission.objects.bulk_create(permissions)
python
def grant_permissions(self, proxy_model): """ Create the default permissions for the just added proxy model """ ContentType = apps.get_model('contenttypes', 'ContentType') try: Permission = apps.get_model('auth', 'Permission') except LookupError: return # searched_perms will hold the permissions we're looking for as (content_type, (codename, name)) searched_perms = [] ctype = ContentType.objects.get_for_model(proxy_model) for perm in self.default_permissions: searched_perms.append(( '{0}_{1}'.format(perm, proxy_model._meta.model_name), "Can {0} {1}".format(perm, proxy_model._meta.verbose_name_raw) )) all_perms = set(Permission.objects.filter( content_type=ctype, ).values_list( 'content_type', 'codename' )) permissions = [ Permission(codename=codename, name=name, content_type=ctype) for codename, name in searched_perms if (ctype.pk, codename) not in all_perms ] Permission.objects.bulk_create(permissions)
[ "def", "grant_permissions", "(", "self", ",", "proxy_model", ")", ":", "ContentType", "=", "apps", ".", "get_model", "(", "'contenttypes'", ",", "'ContentType'", ")", "try", ":", "Permission", "=", "apps", ".", "get_model", "(", "'auth'", ",", "'Permission'", ")", "except", "LookupError", ":", "return", "# searched_perms will hold the permissions we're looking for as (content_type, (codename, name))", "searched_perms", "=", "[", "]", "ctype", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "proxy_model", ")", "for", "perm", "in", "self", ".", "default_permissions", ":", "searched_perms", ".", "append", "(", "(", "'{0}_{1}'", ".", "format", "(", "perm", ",", "proxy_model", ".", "_meta", ".", "model_name", ")", ",", "\"Can {0} {1}\"", ".", "format", "(", "perm", ",", "proxy_model", ".", "_meta", ".", "verbose_name_raw", ")", ")", ")", "all_perms", "=", "set", "(", "Permission", ".", "objects", ".", "filter", "(", "content_type", "=", "ctype", ",", ")", ".", "values_list", "(", "'content_type'", ",", "'codename'", ")", ")", "permissions", "=", "[", "Permission", "(", "codename", "=", "codename", ",", "name", "=", "name", ",", "content_type", "=", "ctype", ")", "for", "codename", ",", "name", "in", "searched_perms", "if", "(", "ctype", ".", "pk", ",", "codename", ")", "not", "in", "all_perms", "]", "Permission", ".", "objects", ".", "bulk_create", "(", "permissions", ")" ]
Create the default permissions for the just added proxy model
[ "Create", "the", "default", "permissions", "for", "the", "just", "added", "proxy", "model" ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/apps.py#L67-L95
5,539
jrief/djangocms-cascade
cmsplugin_cascade/apps.py
CascadeConfig.revoke_permissions
def revoke_permissions(self, ctype): """ Remove all permissions for the content type to be removed """ ContentType = apps.get_model('contenttypes', 'ContentType') try: Permission = apps.get_model('auth', 'Permission') except LookupError: return codenames = ['{0}_{1}'.format(perm, ctype) for perm in self.default_permissions] cascade_element = apps.get_model(self.label, 'cascadeelement') element_ctype = ContentType.objects.get_for_model(cascade_element) Permission.objects.filter(content_type=element_ctype, codename__in=codenames).delete()
python
def revoke_permissions(self, ctype): """ Remove all permissions for the content type to be removed """ ContentType = apps.get_model('contenttypes', 'ContentType') try: Permission = apps.get_model('auth', 'Permission') except LookupError: return codenames = ['{0}_{1}'.format(perm, ctype) for perm in self.default_permissions] cascade_element = apps.get_model(self.label, 'cascadeelement') element_ctype = ContentType.objects.get_for_model(cascade_element) Permission.objects.filter(content_type=element_ctype, codename__in=codenames).delete()
[ "def", "revoke_permissions", "(", "self", ",", "ctype", ")", ":", "ContentType", "=", "apps", ".", "get_model", "(", "'contenttypes'", ",", "'ContentType'", ")", "try", ":", "Permission", "=", "apps", ".", "get_model", "(", "'auth'", ",", "'Permission'", ")", "except", "LookupError", ":", "return", "codenames", "=", "[", "'{0}_{1}'", ".", "format", "(", "perm", ",", "ctype", ")", "for", "perm", "in", "self", ".", "default_permissions", "]", "cascade_element", "=", "apps", ".", "get_model", "(", "self", ".", "label", ",", "'cascadeelement'", ")", "element_ctype", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "cascade_element", ")", "Permission", ".", "objects", ".", "filter", "(", "content_type", "=", "element_ctype", ",", "codename__in", "=", "codenames", ")", ".", "delete", "(", ")" ]
Remove all permissions for the content type to be removed
[ "Remove", "all", "permissions", "for", "the", "content", "type", "to", "be", "removed" ]
58996f990c4068e5d50f0db6030a5c0e06b682e5
https://github.com/jrief/djangocms-cascade/blob/58996f990c4068e5d50f0db6030a5c0e06b682e5/cmsplugin_cascade/apps.py#L97-L110
5,540
adamreeve/npTDMS
nptdms/writer.py
to_file
def to_file(file, array): """Wrapper around ndarray.tofile to support any file-like object""" try: array.tofile(file) except (TypeError, IOError, UnsupportedOperation): # tostring actually returns bytes file.write(array.tostring())
python
def to_file(file, array): """Wrapper around ndarray.tofile to support any file-like object""" try: array.tofile(file) except (TypeError, IOError, UnsupportedOperation): # tostring actually returns bytes file.write(array.tostring())
[ "def", "to_file", "(", "file", ",", "array", ")", ":", "try", ":", "array", ".", "tofile", "(", "file", ")", "except", "(", "TypeError", ",", "IOError", ",", "UnsupportedOperation", ")", ":", "# tostring actually returns bytes", "file", ".", "write", "(", "array", ".", "tostring", "(", ")", ")" ]
Wrapper around ndarray.tofile to support any file-like object
[ "Wrapper", "around", "ndarray", ".", "tofile", "to", "support", "any", "file", "-", "like", "object" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/writer.py#L317-L324
5,541
adamreeve/npTDMS
nptdms/writer.py
TdmsWriter.write_segment
def write_segment(self, objects): """ Write a segment of data to a TDMS file :param objects: A list of TdmsObject instances to write """ segment = TdmsSegment(objects) segment.write(self._file)
python
def write_segment(self, objects): """ Write a segment of data to a TDMS file :param objects: A list of TdmsObject instances to write """ segment = TdmsSegment(objects) segment.write(self._file)
[ "def", "write_segment", "(", "self", ",", "objects", ")", ":", "segment", "=", "TdmsSegment", "(", "objects", ")", "segment", ".", "write", "(", "self", ".", "_file", ")" ]
Write a segment of data to a TDMS file :param objects: A list of TdmsObject instances to write
[ "Write", "a", "segment", "of", "data", "to", "a", "TDMS", "file" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/writer.py#L63-L69
5,542
adamreeve/npTDMS
nptdms/tdms.py
fromfile
def fromfile(file, dtype, count, *args, **kwargs): """Wrapper around np.fromfile to support any file-like object""" try: return np.fromfile(file, dtype=dtype, count=count, *args, **kwargs) except (TypeError, IOError, UnsupportedOperation): return np.frombuffer( file.read(count * np.dtype(dtype).itemsize), dtype=dtype, count=count, *args, **kwargs)
python
def fromfile(file, dtype, count, *args, **kwargs): """Wrapper around np.fromfile to support any file-like object""" try: return np.fromfile(file, dtype=dtype, count=count, *args, **kwargs) except (TypeError, IOError, UnsupportedOperation): return np.frombuffer( file.read(count * np.dtype(dtype).itemsize), dtype=dtype, count=count, *args, **kwargs)
[ "def", "fromfile", "(", "file", ",", "dtype", ",", "count", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "np", ".", "fromfile", "(", "file", ",", "dtype", "=", "dtype", ",", "count", "=", "count", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "(", "TypeError", ",", "IOError", ",", "UnsupportedOperation", ")", ":", "return", "np", ".", "frombuffer", "(", "file", ".", "read", "(", "count", "*", "np", ".", "dtype", "(", "dtype", ")", ".", "itemsize", ")", ",", "dtype", "=", "dtype", ",", "count", "=", "count", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Wrapper around np.fromfile to support any file-like object
[ "Wrapper", "around", "np", ".", "fromfile", "to", "support", "any", "file", "-", "like", "object" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L43-L51
5,543
adamreeve/npTDMS
nptdms/tdms.py
read_property
def read_property(f, endianness="<"): """ Read a property from a segment's metadata """ prop_name = types.String.read(f, endianness) prop_data_type = types.tds_data_types[types.Uint32.read(f, endianness)] value = prop_data_type.read(f, endianness) log.debug("Property %s: %r", prop_name, value) return prop_name, value
python
def read_property(f, endianness="<"): """ Read a property from a segment's metadata """ prop_name = types.String.read(f, endianness) prop_data_type = types.tds_data_types[types.Uint32.read(f, endianness)] value = prop_data_type.read(f, endianness) log.debug("Property %s: %r", prop_name, value) return prop_name, value
[ "def", "read_property", "(", "f", ",", "endianness", "=", "\"<\"", ")", ":", "prop_name", "=", "types", ".", "String", ".", "read", "(", "f", ",", "endianness", ")", "prop_data_type", "=", "types", ".", "tds_data_types", "[", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")", "]", "value", "=", "prop_data_type", ".", "read", "(", "f", ",", "endianness", ")", "log", ".", "debug", "(", "\"Property %s: %r\"", ",", "prop_name", ",", "value", ")", "return", "prop_name", ",", "value" ]
Read a property from a segment's metadata
[ "Read", "a", "property", "from", "a", "segment", "s", "metadata" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L54-L61
5,544
adamreeve/npTDMS
nptdms/tdms.py
read_string_data
def read_string_data(file, number_values, endianness): """ Read string raw data This is stored as an array of offsets followed by the contiguous string data. """ offsets = [0] for i in range(number_values): offsets.append(types.Uint32.read(file, endianness)) strings = [] for i in range(number_values): s = file.read(offsets[i + 1] - offsets[i]) strings.append(s.decode('utf-8')) return strings
python
def read_string_data(file, number_values, endianness): """ Read string raw data This is stored as an array of offsets followed by the contiguous string data. """ offsets = [0] for i in range(number_values): offsets.append(types.Uint32.read(file, endianness)) strings = [] for i in range(number_values): s = file.read(offsets[i + 1] - offsets[i]) strings.append(s.decode('utf-8')) return strings
[ "def", "read_string_data", "(", "file", ",", "number_values", ",", "endianness", ")", ":", "offsets", "=", "[", "0", "]", "for", "i", "in", "range", "(", "number_values", ")", ":", "offsets", ".", "append", "(", "types", ".", "Uint32", ".", "read", "(", "file", ",", "endianness", ")", ")", "strings", "=", "[", "]", "for", "i", "in", "range", "(", "number_values", ")", ":", "s", "=", "file", ".", "read", "(", "offsets", "[", "i", "+", "1", "]", "-", "offsets", "[", "i", "]", ")", "strings", ".", "append", "(", "s", ".", "decode", "(", "'utf-8'", ")", ")", "return", "strings" ]
Read string raw data This is stored as an array of offsets followed by the contiguous string data.
[ "Read", "string", "raw", "data" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1049-L1062
5,545
adamreeve/npTDMS
nptdms/tdms.py
path_components
def path_components(path): """Convert a path into group and channel name components""" def yield_components(path): # Iterate over each character and the next character chars = zip_longest(path, path[1:]) try: # Iterate over components while True: c, n = next(chars) if c != '/': raise ValueError("Invalid path, expected \"/\"") elif (n is not None and n != "'"): raise ValueError("Invalid path, expected \"'\"") else: # Consume "'" or raise StopIteration if at the end next(chars) component = [] # Iterate over characters in component name while True: c, n = next(chars) if c == "'" and n == "'": component += "'" # Consume second "'" next(chars) elif c == "'": yield "".join(component) break else: component += c except StopIteration: return return list(yield_components(path))
python
def path_components(path): """Convert a path into group and channel name components""" def yield_components(path): # Iterate over each character and the next character chars = zip_longest(path, path[1:]) try: # Iterate over components while True: c, n = next(chars) if c != '/': raise ValueError("Invalid path, expected \"/\"") elif (n is not None and n != "'"): raise ValueError("Invalid path, expected \"'\"") else: # Consume "'" or raise StopIteration if at the end next(chars) component = [] # Iterate over characters in component name while True: c, n = next(chars) if c == "'" and n == "'": component += "'" # Consume second "'" next(chars) elif c == "'": yield "".join(component) break else: component += c except StopIteration: return return list(yield_components(path))
[ "def", "path_components", "(", "path", ")", ":", "def", "yield_components", "(", "path", ")", ":", "# Iterate over each character and the next character", "chars", "=", "zip_longest", "(", "path", ",", "path", "[", "1", ":", "]", ")", "try", ":", "# Iterate over components", "while", "True", ":", "c", ",", "n", "=", "next", "(", "chars", ")", "if", "c", "!=", "'/'", ":", "raise", "ValueError", "(", "\"Invalid path, expected \\\"/\\\"\"", ")", "elif", "(", "n", "is", "not", "None", "and", "n", "!=", "\"'\"", ")", ":", "raise", "ValueError", "(", "\"Invalid path, expected \\\"'\\\"\"", ")", "else", ":", "# Consume \"'\" or raise StopIteration if at the end", "next", "(", "chars", ")", "component", "=", "[", "]", "# Iterate over characters in component name", "while", "True", ":", "c", ",", "n", "=", "next", "(", "chars", ")", "if", "c", "==", "\"'\"", "and", "n", "==", "\"'\"", ":", "component", "+=", "\"'\"", "# Consume second \"'\"", "next", "(", "chars", ")", "elif", "c", "==", "\"'\"", ":", "yield", "\"\"", ".", "join", "(", "component", ")", "break", "else", ":", "component", "+=", "c", "except", "StopIteration", ":", "return", "return", "list", "(", "yield_components", "(", "path", ")", ")" ]
Convert a path into group and channel name components
[ "Convert", "a", "path", "into", "group", "and", "channel", "name", "components" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1065-L1098
5,546
adamreeve/npTDMS
nptdms/tdms.py
TdmsFile.object
def object(self, *path): """Get a TDMS object from the file :param path: The object group and channel. Providing no channel returns a group object, and providing no channel or group will return the root object. :rtype: :class:`TdmsObject` For example, to get the root object:: object() To get a group:: object("group_name") To get a channel:: object("group_name", "channel_name") """ object_path = self._path(*path) try: return self.objects[object_path] except KeyError: raise KeyError("Invalid object path: %s" % object_path)
python
def object(self, *path): """Get a TDMS object from the file :param path: The object group and channel. Providing no channel returns a group object, and providing no channel or group will return the root object. :rtype: :class:`TdmsObject` For example, to get the root object:: object() To get a group:: object("group_name") To get a channel:: object("group_name", "channel_name") """ object_path = self._path(*path) try: return self.objects[object_path] except KeyError: raise KeyError("Invalid object path: %s" % object_path)
[ "def", "object", "(", "self", ",", "*", "path", ")", ":", "object_path", "=", "self", ".", "_path", "(", "*", "path", ")", "try", ":", "return", "self", ".", "objects", "[", "object_path", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"Invalid object path: %s\"", "%", "object_path", ")" ]
Get a TDMS object from the file :param path: The object group and channel. Providing no channel returns a group object, and providing no channel or group will return the root object. :rtype: :class:`TdmsObject` For example, to get the root object:: object() To get a group:: object("group_name") To get a channel:: object("group_name", "channel_name")
[ "Get", "a", "TDMS", "object", "from", "the", "file" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L132-L157
5,547
adamreeve/npTDMS
nptdms/tdms.py
TdmsFile.groups
def groups(self): """Return the names of groups in the file Note that there is not necessarily a TDMS object associated with each group name. :rtype: List of strings. """ # Split paths into components and take the first (group) component. object_paths = ( path_components(path) for path in self.objects) group_names = (path[0] for path in object_paths if len(path) > 0) # Use an ordered dict as an ordered set to find unique # groups in order. groups_set = OrderedDict() for group in group_names: groups_set[group] = None return list(groups_set)
python
def groups(self): """Return the names of groups in the file Note that there is not necessarily a TDMS object associated with each group name. :rtype: List of strings. """ # Split paths into components and take the first (group) component. object_paths = ( path_components(path) for path in self.objects) group_names = (path[0] for path in object_paths if len(path) > 0) # Use an ordered dict as an ordered set to find unique # groups in order. groups_set = OrderedDict() for group in group_names: groups_set[group] = None return list(groups_set)
[ "def", "groups", "(", "self", ")", ":", "# Split paths into components and take the first (group) component.", "object_paths", "=", "(", "path_components", "(", "path", ")", "for", "path", "in", "self", ".", "objects", ")", "group_names", "=", "(", "path", "[", "0", "]", "for", "path", "in", "object_paths", "if", "len", "(", "path", ")", ">", "0", ")", "# Use an ordered dict as an ordered set to find unique", "# groups in order.", "groups_set", "=", "OrderedDict", "(", ")", "for", "group", "in", "group_names", ":", "groups_set", "[", "group", "]", "=", "None", "return", "list", "(", "groups_set", ")" ]
Return the names of groups in the file Note that there is not necessarily a TDMS object associated with each group name. :rtype: List of strings.
[ "Return", "the", "names", "of", "groups", "in", "the", "file" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L159-L180
5,548
adamreeve/npTDMS
nptdms/tdms.py
TdmsFile.group_channels
def group_channels(self, group): """Returns a list of channel objects for the given group :param group: Name of the group to get channels for. :rtype: List of :class:`TdmsObject` objects. """ path = self._path(group) return [ self.objects[p] for p in self.objects if p.startswith(path + '/')]
python
def group_channels(self, group): """Returns a list of channel objects for the given group :param group: Name of the group to get channels for. :rtype: List of :class:`TdmsObject` objects. """ path = self._path(group) return [ self.objects[p] for p in self.objects if p.startswith(path + '/')]
[ "def", "group_channels", "(", "self", ",", "group", ")", ":", "path", "=", "self", ".", "_path", "(", "group", ")", "return", "[", "self", ".", "objects", "[", "p", "]", "for", "p", "in", "self", ".", "objects", "if", "p", ".", "startswith", "(", "path", "+", "'/'", ")", "]" ]
Returns a list of channel objects for the given group :param group: Name of the group to get channels for. :rtype: List of :class:`TdmsObject` objects.
[ "Returns", "a", "list", "of", "channel", "objects", "for", "the", "given", "group" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L182-L194
5,549
adamreeve/npTDMS
nptdms/tdms.py
TdmsFile.as_dataframe
def as_dataframe(self, time_index=False, absolute_time=False): """ Converts the TDMS file to a DataFrame :param time_index: Whether to include a time index for the dataframe. :param absolute_time: If time_index is true, whether the time index values are absolute times or relative to the start time. :return: The full TDMS file data. :rtype: pandas.DataFrame """ import pandas as pd dataframe_dict = OrderedDict() for key, value in self.objects.items(): if value.has_data: index = value.time_track(absolute_time) if time_index else None dataframe_dict[key] = pd.Series(data=value.data, index=index) return pd.DataFrame.from_dict(dataframe_dict)
python
def as_dataframe(self, time_index=False, absolute_time=False): """ Converts the TDMS file to a DataFrame :param time_index: Whether to include a time index for the dataframe. :param absolute_time: If time_index is true, whether the time index values are absolute times or relative to the start time. :return: The full TDMS file data. :rtype: pandas.DataFrame """ import pandas as pd dataframe_dict = OrderedDict() for key, value in self.objects.items(): if value.has_data: index = value.time_track(absolute_time) if time_index else None dataframe_dict[key] = pd.Series(data=value.data, index=index) return pd.DataFrame.from_dict(dataframe_dict)
[ "def", "as_dataframe", "(", "self", ",", "time_index", "=", "False", ",", "absolute_time", "=", "False", ")", ":", "import", "pandas", "as", "pd", "dataframe_dict", "=", "OrderedDict", "(", ")", "for", "key", ",", "value", "in", "self", ".", "objects", ".", "items", "(", ")", ":", "if", "value", ".", "has_data", ":", "index", "=", "value", ".", "time_track", "(", "absolute_time", ")", "if", "time_index", "else", "None", "dataframe_dict", "[", "key", "]", "=", "pd", ".", "Series", "(", "data", "=", "value", ".", "data", ",", "index", "=", "index", ")", "return", "pd", ".", "DataFrame", ".", "from_dict", "(", "dataframe_dict", ")" ]
Converts the TDMS file to a DataFrame :param time_index: Whether to include a time index for the dataframe. :param absolute_time: If time_index is true, whether the time index values are absolute times or relative to the start time. :return: The full TDMS file data. :rtype: pandas.DataFrame
[ "Converts", "the", "TDMS", "file", "to", "a", "DataFrame" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L208-L226
5,550
adamreeve/npTDMS
nptdms/tdms.py
TdmsFile.as_hdf
def as_hdf(self, filepath, mode='w', group='/'): """ Converts the TDMS file into an HDF5 file :param filepath: The path of the HDF5 file you want to write to. :param mode: The write mode of the HDF5 file. This can be w, a ... :param group: A group in the HDF5 file that will contain the TDMS data. """ import h5py # Groups in TDMS are mapped to the first level of the HDF5 hierarchy # Channels in TDMS are then mapped to the second level of the HDF5 # hierarchy, under the appropriate groups. # Properties in TDMS are mapped to attributes in HDF5. # These all exist under the appropriate, channel group etc. h5file = h5py.File(filepath, mode) container_group = None if group in h5file: container_group = h5file[group] else: container_group = h5file.create_group(group) # First write the properties at the root level try: root = self.object() for property_name, property_value in root.properties.items(): container_group.attrs[property_name] = property_value except KeyError: # No root object present pass # Now iterate through groups and channels, # writing the properties and data for group_name in self.groups(): try: group = self.object(group_name) # Write the group's properties for prop_name, prop_value in group.properties.items(): container_group[group_name].attrs[prop_name] = prop_value except KeyError: # No group object present pass # Write properties and data for each channel for channel in self.group_channels(group_name): for prop_name, prop_value in channel.properties.items(): container_group.attrs[prop_name] = prop_value container_group[group_name+'/'+channel.channel] = channel.data return h5file
python
def as_hdf(self, filepath, mode='w', group='/'): """ Converts the TDMS file into an HDF5 file :param filepath: The path of the HDF5 file you want to write to. :param mode: The write mode of the HDF5 file. This can be w, a ... :param group: A group in the HDF5 file that will contain the TDMS data. """ import h5py # Groups in TDMS are mapped to the first level of the HDF5 hierarchy # Channels in TDMS are then mapped to the second level of the HDF5 # hierarchy, under the appropriate groups. # Properties in TDMS are mapped to attributes in HDF5. # These all exist under the appropriate, channel group etc. h5file = h5py.File(filepath, mode) container_group = None if group in h5file: container_group = h5file[group] else: container_group = h5file.create_group(group) # First write the properties at the root level try: root = self.object() for property_name, property_value in root.properties.items(): container_group.attrs[property_name] = property_value except KeyError: # No root object present pass # Now iterate through groups and channels, # writing the properties and data for group_name in self.groups(): try: group = self.object(group_name) # Write the group's properties for prop_name, prop_value in group.properties.items(): container_group[group_name].attrs[prop_name] = prop_value except KeyError: # No group object present pass # Write properties and data for each channel for channel in self.group_channels(group_name): for prop_name, prop_value in channel.properties.items(): container_group.attrs[prop_name] = prop_value container_group[group_name+'/'+channel.channel] = channel.data return h5file
[ "def", "as_hdf", "(", "self", ",", "filepath", ",", "mode", "=", "'w'", ",", "group", "=", "'/'", ")", ":", "import", "h5py", "# Groups in TDMS are mapped to the first level of the HDF5 hierarchy", "# Channels in TDMS are then mapped to the second level of the HDF5", "# hierarchy, under the appropriate groups.", "# Properties in TDMS are mapped to attributes in HDF5.", "# These all exist under the appropriate, channel group etc.", "h5file", "=", "h5py", ".", "File", "(", "filepath", ",", "mode", ")", "container_group", "=", "None", "if", "group", "in", "h5file", ":", "container_group", "=", "h5file", "[", "group", "]", "else", ":", "container_group", "=", "h5file", ".", "create_group", "(", "group", ")", "# First write the properties at the root level", "try", ":", "root", "=", "self", ".", "object", "(", ")", "for", "property_name", ",", "property_value", "in", "root", ".", "properties", ".", "items", "(", ")", ":", "container_group", ".", "attrs", "[", "property_name", "]", "=", "property_value", "except", "KeyError", ":", "# No root object present", "pass", "# Now iterate through groups and channels,", "# writing the properties and data", "for", "group_name", "in", "self", ".", "groups", "(", ")", ":", "try", ":", "group", "=", "self", ".", "object", "(", "group_name", ")", "# Write the group's properties", "for", "prop_name", ",", "prop_value", "in", "group", ".", "properties", ".", "items", "(", ")", ":", "container_group", "[", "group_name", "]", ".", "attrs", "[", "prop_name", "]", "=", "prop_value", "except", "KeyError", ":", "# No group object present", "pass", "# Write properties and data for each channel", "for", "channel", "in", "self", ".", "group_channels", "(", "group_name", ")", ":", "for", "prop_name", ",", "prop_value", "in", "channel", ".", "properties", ".", "items", "(", ")", ":", "container_group", ".", "attrs", "[", "prop_name", "]", "=", "prop_value", "container_group", "[", "group_name", "+", "'/'", "+", "channel", ".", "channel", "]", "=", "channel", ".", "data", "return", "h5file" ]
Converts the TDMS file into an HDF5 file :param filepath: The path of the HDF5 file you want to write to. :param mode: The write mode of the HDF5 file. This can be w, a ... :param group: A group in the HDF5 file that will contain the TDMS data.
[ "Converts", "the", "TDMS", "file", "into", "an", "HDF5", "file" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L228-L284
5,551
adamreeve/npTDMS
nptdms/tdms.py
_TdmsSegment.read_metadata
def read_metadata(self, f, objects, previous_segment=None): """Read segment metadata section and update object information""" if not self.toc["kTocMetaData"]: try: self.ordered_objects = previous_segment.ordered_objects except AttributeError: raise ValueError( "kTocMetaData is not set for segment but " "there is no previous segment") self.calculate_chunks() return if not self.toc["kTocNewObjList"]: # In this case, there can be a list of new objects that # are appended, or previous objects can also be repeated # if their properties change self.ordered_objects = [ copy(o) for o in previous_segment.ordered_objects] log.debug("Reading metadata at %d", f.tell()) # First four bytes have number of objects in metadata num_objects = types.Int32.read(f, self.endianness) for obj in range(num_objects): # Read the object path object_path = types.String.read(f, self.endianness) # If this is a new segment for an existing object, # reuse the existing object, otherwise, # create a new object and add it to the object dictionary if object_path in objects: obj = objects[object_path] else: obj = TdmsObject(object_path, self.tdms_file) objects[object_path] = obj # Add this segment object to the list of segment objects, # re-using any properties from previous segments. updating_existing = False if not self.toc["kTocNewObjList"]: # Search for the same object from the previous segment # object list. obj_index = [ i for i, o in enumerate(self.ordered_objects) if o.tdms_object is obj] if len(obj_index) > 0: updating_existing = True log.debug("Updating object in segment list") obj_index = obj_index[0] segment_obj = self.ordered_objects[obj_index] if not updating_existing: if obj._previous_segment_object is not None: log.debug("Copying previous segment object") segment_obj = copy(obj._previous_segment_object) else: log.debug("Creating a new segment object") segment_obj = _TdmsSegmentObject(obj, self.endianness) self.ordered_objects.append(segment_obj) # Read the metadata for this object, updating any # data structure information and properties. segment_obj._read_metadata(f) obj._previous_segment_object = segment_obj self.calculate_chunks()
python
def read_metadata(self, f, objects, previous_segment=None): """Read segment metadata section and update object information""" if not self.toc["kTocMetaData"]: try: self.ordered_objects = previous_segment.ordered_objects except AttributeError: raise ValueError( "kTocMetaData is not set for segment but " "there is no previous segment") self.calculate_chunks() return if not self.toc["kTocNewObjList"]: # In this case, there can be a list of new objects that # are appended, or previous objects can also be repeated # if their properties change self.ordered_objects = [ copy(o) for o in previous_segment.ordered_objects] log.debug("Reading metadata at %d", f.tell()) # First four bytes have number of objects in metadata num_objects = types.Int32.read(f, self.endianness) for obj in range(num_objects): # Read the object path object_path = types.String.read(f, self.endianness) # If this is a new segment for an existing object, # reuse the existing object, otherwise, # create a new object and add it to the object dictionary if object_path in objects: obj = objects[object_path] else: obj = TdmsObject(object_path, self.tdms_file) objects[object_path] = obj # Add this segment object to the list of segment objects, # re-using any properties from previous segments. updating_existing = False if not self.toc["kTocNewObjList"]: # Search for the same object from the previous segment # object list. obj_index = [ i for i, o in enumerate(self.ordered_objects) if o.tdms_object is obj] if len(obj_index) > 0: updating_existing = True log.debug("Updating object in segment list") obj_index = obj_index[0] segment_obj = self.ordered_objects[obj_index] if not updating_existing: if obj._previous_segment_object is not None: log.debug("Copying previous segment object") segment_obj = copy(obj._previous_segment_object) else: log.debug("Creating a new segment object") segment_obj = _TdmsSegmentObject(obj, self.endianness) self.ordered_objects.append(segment_obj) # Read the metadata for this object, updating any # data structure information and properties. segment_obj._read_metadata(f) obj._previous_segment_object = segment_obj self.calculate_chunks()
[ "def", "read_metadata", "(", "self", ",", "f", ",", "objects", ",", "previous_segment", "=", "None", ")", ":", "if", "not", "self", ".", "toc", "[", "\"kTocMetaData\"", "]", ":", "try", ":", "self", ".", "ordered_objects", "=", "previous_segment", ".", "ordered_objects", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"kTocMetaData is not set for segment but \"", "\"there is no previous segment\"", ")", "self", ".", "calculate_chunks", "(", ")", "return", "if", "not", "self", ".", "toc", "[", "\"kTocNewObjList\"", "]", ":", "# In this case, there can be a list of new objects that", "# are appended, or previous objects can also be repeated", "# if their properties change", "self", ".", "ordered_objects", "=", "[", "copy", "(", "o", ")", "for", "o", "in", "previous_segment", ".", "ordered_objects", "]", "log", ".", "debug", "(", "\"Reading metadata at %d\"", ",", "f", ".", "tell", "(", ")", ")", "# First four bytes have number of objects in metadata", "num_objects", "=", "types", ".", "Int32", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "for", "obj", "in", "range", "(", "num_objects", ")", ":", "# Read the object path", "object_path", "=", "types", ".", "String", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "# If this is a new segment for an existing object,", "# reuse the existing object, otherwise,", "# create a new object and add it to the object dictionary", "if", "object_path", "in", "objects", ":", "obj", "=", "objects", "[", "object_path", "]", "else", ":", "obj", "=", "TdmsObject", "(", "object_path", ",", "self", ".", "tdms_file", ")", "objects", "[", "object_path", "]", "=", "obj", "# Add this segment object to the list of segment objects,", "# re-using any properties from previous segments.", "updating_existing", "=", "False", "if", "not", "self", ".", "toc", "[", "\"kTocNewObjList\"", "]", ":", "# Search for the same object from the previous segment", "# object list.", "obj_index", "=", "[", "i", "for", "i", ",", "o", "in", "enumerate", "(", "self", ".", "ordered_objects", ")", "if", "o", ".", "tdms_object", "is", "obj", "]", "if", "len", "(", "obj_index", ")", ">", "0", ":", "updating_existing", "=", "True", "log", ".", "debug", "(", "\"Updating object in segment list\"", ")", "obj_index", "=", "obj_index", "[", "0", "]", "segment_obj", "=", "self", ".", "ordered_objects", "[", "obj_index", "]", "if", "not", "updating_existing", ":", "if", "obj", ".", "_previous_segment_object", "is", "not", "None", ":", "log", ".", "debug", "(", "\"Copying previous segment object\"", ")", "segment_obj", "=", "copy", "(", "obj", ".", "_previous_segment_object", ")", "else", ":", "log", ".", "debug", "(", "\"Creating a new segment object\"", ")", "segment_obj", "=", "_TdmsSegmentObject", "(", "obj", ",", "self", ".", "endianness", ")", "self", ".", "ordered_objects", ".", "append", "(", "segment_obj", ")", "# Read the metadata for this object, updating any", "# data structure information and properties.", "segment_obj", ".", "_read_metadata", "(", "f", ")", "obj", ".", "_previous_segment_object", "=", "segment_obj", "self", ".", "calculate_chunks", "(", ")" ]
Read segment metadata section and update object information
[ "Read", "segment", "metadata", "section", "and", "update", "object", "information" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L359-L423
5,552
adamreeve/npTDMS
nptdms/tdms.py
_TdmsSegment.calculate_chunks
def calculate_chunks(self): """ Work out the number of chunks the data is in, for cases where the meta data doesn't change at all so there is no lead in. Also increments the number of values for objects in this segment, based on the number of chunks. """ if self.toc['kTocDAQmxRawData']: # chunks defined differently for DAQmxRawData format try: data_size = next( o.number_values * o.raw_data_width for o in self.ordered_objects if o.has_data and o.number_values * o.raw_data_width > 0) except StopIteration: data_size = 0 else: data_size = sum([ o.data_size for o in self.ordered_objects if o.has_data]) total_data_size = self.next_segment_offset - self.raw_data_offset if data_size < 0 or total_data_size < 0: raise ValueError("Negative data size") elif data_size == 0: # Sometimes kTocRawData is set, but there isn't actually any data if total_data_size != data_size: raise ValueError( "Zero channel data size but data length based on " "segment offset is %d." % total_data_size) self.num_chunks = 0 return chunk_remainder = total_data_size % data_size if chunk_remainder == 0: self.num_chunks = int(total_data_size // data_size) # Update data count for the overall tdms object # using the data count for this segment. for obj in self.ordered_objects: if obj.has_data: obj.tdms_object.number_values += ( obj.number_values * self.num_chunks) else: log.warning( "Data size %d is not a multiple of the " "chunk size %d. Will attempt to read last chunk" % (total_data_size, data_size)) self.num_chunks = 1 + int(total_data_size // data_size) self.final_chunk_proportion = ( float(chunk_remainder) / float(data_size)) for obj in self.ordered_objects: if obj.has_data: obj.tdms_object.number_values += ( obj.number_values * (self.num_chunks - 1) + int( obj.number_values * self.final_chunk_proportion))
python
def calculate_chunks(self): """ Work out the number of chunks the data is in, for cases where the meta data doesn't change at all so there is no lead in. Also increments the number of values for objects in this segment, based on the number of chunks. """ if self.toc['kTocDAQmxRawData']: # chunks defined differently for DAQmxRawData format try: data_size = next( o.number_values * o.raw_data_width for o in self.ordered_objects if o.has_data and o.number_values * o.raw_data_width > 0) except StopIteration: data_size = 0 else: data_size = sum([ o.data_size for o in self.ordered_objects if o.has_data]) total_data_size = self.next_segment_offset - self.raw_data_offset if data_size < 0 or total_data_size < 0: raise ValueError("Negative data size") elif data_size == 0: # Sometimes kTocRawData is set, but there isn't actually any data if total_data_size != data_size: raise ValueError( "Zero channel data size but data length based on " "segment offset is %d." % total_data_size) self.num_chunks = 0 return chunk_remainder = total_data_size % data_size if chunk_remainder == 0: self.num_chunks = int(total_data_size // data_size) # Update data count for the overall tdms object # using the data count for this segment. for obj in self.ordered_objects: if obj.has_data: obj.tdms_object.number_values += ( obj.number_values * self.num_chunks) else: log.warning( "Data size %d is not a multiple of the " "chunk size %d. Will attempt to read last chunk" % (total_data_size, data_size)) self.num_chunks = 1 + int(total_data_size // data_size) self.final_chunk_proportion = ( float(chunk_remainder) / float(data_size)) for obj in self.ordered_objects: if obj.has_data: obj.tdms_object.number_values += ( obj.number_values * (self.num_chunks - 1) + int( obj.number_values * self.final_chunk_proportion))
[ "def", "calculate_chunks", "(", "self", ")", ":", "if", "self", ".", "toc", "[", "'kTocDAQmxRawData'", "]", ":", "# chunks defined differently for DAQmxRawData format", "try", ":", "data_size", "=", "next", "(", "o", ".", "number_values", "*", "o", ".", "raw_data_width", "for", "o", "in", "self", ".", "ordered_objects", "if", "o", ".", "has_data", "and", "o", ".", "number_values", "*", "o", ".", "raw_data_width", ">", "0", ")", "except", "StopIteration", ":", "data_size", "=", "0", "else", ":", "data_size", "=", "sum", "(", "[", "o", ".", "data_size", "for", "o", "in", "self", ".", "ordered_objects", "if", "o", ".", "has_data", "]", ")", "total_data_size", "=", "self", ".", "next_segment_offset", "-", "self", ".", "raw_data_offset", "if", "data_size", "<", "0", "or", "total_data_size", "<", "0", ":", "raise", "ValueError", "(", "\"Negative data size\"", ")", "elif", "data_size", "==", "0", ":", "# Sometimes kTocRawData is set, but there isn't actually any data", "if", "total_data_size", "!=", "data_size", ":", "raise", "ValueError", "(", "\"Zero channel data size but data length based on \"", "\"segment offset is %d.\"", "%", "total_data_size", ")", "self", ".", "num_chunks", "=", "0", "return", "chunk_remainder", "=", "total_data_size", "%", "data_size", "if", "chunk_remainder", "==", "0", ":", "self", ".", "num_chunks", "=", "int", "(", "total_data_size", "//", "data_size", ")", "# Update data count for the overall tdms object", "# using the data count for this segment.", "for", "obj", "in", "self", ".", "ordered_objects", ":", "if", "obj", ".", "has_data", ":", "obj", ".", "tdms_object", ".", "number_values", "+=", "(", "obj", ".", "number_values", "*", "self", ".", "num_chunks", ")", "else", ":", "log", ".", "warning", "(", "\"Data size %d is not a multiple of the \"", "\"chunk size %d. Will attempt to read last chunk\"", "%", "(", "total_data_size", ",", "data_size", ")", ")", "self", ".", "num_chunks", "=", "1", "+", "int", "(", "total_data_size", "//", "data_size", ")", "self", ".", "final_chunk_proportion", "=", "(", "float", "(", "chunk_remainder", ")", "/", "float", "(", "data_size", ")", ")", "for", "obj", "in", "self", ".", "ordered_objects", ":", "if", "obj", ".", "has_data", ":", "obj", ".", "tdms_object", ".", "number_values", "+=", "(", "obj", ".", "number_values", "*", "(", "self", ".", "num_chunks", "-", "1", ")", "+", "int", "(", "obj", ".", "number_values", "*", "self", ".", "final_chunk_proportion", ")", ")" ]
Work out the number of chunks the data is in, for cases where the meta data doesn't change at all so there is no lead in. Also increments the number of values for objects in this segment, based on the number of chunks.
[ "Work", "out", "the", "number", "of", "chunks", "the", "data", "is", "in", "for", "cases", "where", "the", "meta", "data", "doesn", "t", "change", "at", "all", "so", "there", "is", "no", "lead", "in", "." ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L425-L485
5,553
adamreeve/npTDMS
nptdms/tdms.py
_TdmsSegment.read_raw_data
def read_raw_data(self, f): """Read signal data from file""" if not self.toc["kTocRawData"]: return f.seek(self.data_position) total_data_size = self.next_segment_offset - self.raw_data_offset log.debug( "Reading %d bytes of data at %d in %d chunks" % (total_data_size, f.tell(), self.num_chunks)) for chunk in range(self.num_chunks): if self.toc["kTocInterleavedData"]: log.debug("Data is interleaved") data_objects = [o for o in self.ordered_objects if o.has_data] # If all data types have numpy types and all the lengths are # the same, then we can read all data at once with numpy, # which is much faster all_numpy = all( (o.data_type.nptype is not None for o in data_objects)) same_length = (len( set((o.number_values for o in data_objects))) == 1) if (all_numpy and same_length): self._read_interleaved_numpy(f, data_objects) else: self._read_interleaved(f, data_objects) else: object_data = {} log.debug("Data is contiguous") for obj in self.ordered_objects: if obj.has_data: if (chunk == (self.num_chunks - 1) and self.final_chunk_proportion != 1.0): number_values = int( obj.number_values * self.final_chunk_proportion) else: number_values = obj.number_values object_data[obj.path] = ( obj._read_values(f, number_values)) for obj in self.ordered_objects: if obj.has_data: obj.tdms_object._update_data(object_data[obj.path])
python
def read_raw_data(self, f): """Read signal data from file""" if not self.toc["kTocRawData"]: return f.seek(self.data_position) total_data_size = self.next_segment_offset - self.raw_data_offset log.debug( "Reading %d bytes of data at %d in %d chunks" % (total_data_size, f.tell(), self.num_chunks)) for chunk in range(self.num_chunks): if self.toc["kTocInterleavedData"]: log.debug("Data is interleaved") data_objects = [o for o in self.ordered_objects if o.has_data] # If all data types have numpy types and all the lengths are # the same, then we can read all data at once with numpy, # which is much faster all_numpy = all( (o.data_type.nptype is not None for o in data_objects)) same_length = (len( set((o.number_values for o in data_objects))) == 1) if (all_numpy and same_length): self._read_interleaved_numpy(f, data_objects) else: self._read_interleaved(f, data_objects) else: object_data = {} log.debug("Data is contiguous") for obj in self.ordered_objects: if obj.has_data: if (chunk == (self.num_chunks - 1) and self.final_chunk_proportion != 1.0): number_values = int( obj.number_values * self.final_chunk_proportion) else: number_values = obj.number_values object_data[obj.path] = ( obj._read_values(f, number_values)) for obj in self.ordered_objects: if obj.has_data: obj.tdms_object._update_data(object_data[obj.path])
[ "def", "read_raw_data", "(", "self", ",", "f", ")", ":", "if", "not", "self", ".", "toc", "[", "\"kTocRawData\"", "]", ":", "return", "f", ".", "seek", "(", "self", ".", "data_position", ")", "total_data_size", "=", "self", ".", "next_segment_offset", "-", "self", ".", "raw_data_offset", "log", ".", "debug", "(", "\"Reading %d bytes of data at %d in %d chunks\"", "%", "(", "total_data_size", ",", "f", ".", "tell", "(", ")", ",", "self", ".", "num_chunks", ")", ")", "for", "chunk", "in", "range", "(", "self", ".", "num_chunks", ")", ":", "if", "self", ".", "toc", "[", "\"kTocInterleavedData\"", "]", ":", "log", ".", "debug", "(", "\"Data is interleaved\"", ")", "data_objects", "=", "[", "o", "for", "o", "in", "self", ".", "ordered_objects", "if", "o", ".", "has_data", "]", "# If all data types have numpy types and all the lengths are", "# the same, then we can read all data at once with numpy,", "# which is much faster", "all_numpy", "=", "all", "(", "(", "o", ".", "data_type", ".", "nptype", "is", "not", "None", "for", "o", "in", "data_objects", ")", ")", "same_length", "=", "(", "len", "(", "set", "(", "(", "o", ".", "number_values", "for", "o", "in", "data_objects", ")", ")", ")", "==", "1", ")", "if", "(", "all_numpy", "and", "same_length", ")", ":", "self", ".", "_read_interleaved_numpy", "(", "f", ",", "data_objects", ")", "else", ":", "self", ".", "_read_interleaved", "(", "f", ",", "data_objects", ")", "else", ":", "object_data", "=", "{", "}", "log", ".", "debug", "(", "\"Data is contiguous\"", ")", "for", "obj", "in", "self", ".", "ordered_objects", ":", "if", "obj", ".", "has_data", ":", "if", "(", "chunk", "==", "(", "self", ".", "num_chunks", "-", "1", ")", "and", "self", ".", "final_chunk_proportion", "!=", "1.0", ")", ":", "number_values", "=", "int", "(", "obj", ".", "number_values", "*", "self", ".", "final_chunk_proportion", ")", "else", ":", "number_values", "=", "obj", ".", "number_values", "object_data", "[", "obj", ".", "path", "]", "=", "(", "obj", ".", "_read_values", "(", "f", ",", "number_values", ")", ")", "for", "obj", "in", "self", ".", "ordered_objects", ":", "if", "obj", ".", "has_data", ":", "obj", ".", "tdms_object", ".", "_update_data", "(", "object_data", "[", "obj", ".", "path", "]", ")" ]
Read signal data from file
[ "Read", "signal", "data", "from", "file" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L487-L532
5,554
adamreeve/npTDMS
nptdms/tdms.py
_TdmsSegment._read_interleaved_numpy
def _read_interleaved_numpy(self, f, data_objects): """Read interleaved data where all channels have a numpy type""" log.debug("Reading interleaved data all at once") # Read all data into 1 byte unsigned ints first all_channel_bytes = data_objects[0].raw_data_width if all_channel_bytes == 0: all_channel_bytes = sum((o.data_type.size for o in data_objects)) log.debug("all_channel_bytes: %d", all_channel_bytes) number_bytes = int(all_channel_bytes * data_objects[0].number_values) combined_data = fromfile(f, dtype=np.uint8, count=number_bytes) # Reshape, so that one row is all bytes for all objects combined_data = combined_data.reshape(-1, all_channel_bytes) # Now set arrays for each channel data_pos = 0 for (i, obj) in enumerate(data_objects): byte_columns = tuple( range(data_pos, obj.data_type.size + data_pos)) log.debug("Byte columns for channel %d: %s", i, byte_columns) # Select columns for this channel, so that number of values will # be number of bytes per point * number of data points. # Then use ravel to flatten the results into a vector. object_data = combined_data[:, byte_columns].ravel() # Now set correct data type, so that the array length should # be correct object_data.dtype = ( np.dtype(obj.data_type.nptype).newbyteorder(self.endianness)) obj.tdms_object._update_data(object_data) data_pos += obj.data_type.size
python
def _read_interleaved_numpy(self, f, data_objects): """Read interleaved data where all channels have a numpy type""" log.debug("Reading interleaved data all at once") # Read all data into 1 byte unsigned ints first all_channel_bytes = data_objects[0].raw_data_width if all_channel_bytes == 0: all_channel_bytes = sum((o.data_type.size for o in data_objects)) log.debug("all_channel_bytes: %d", all_channel_bytes) number_bytes = int(all_channel_bytes * data_objects[0].number_values) combined_data = fromfile(f, dtype=np.uint8, count=number_bytes) # Reshape, so that one row is all bytes for all objects combined_data = combined_data.reshape(-1, all_channel_bytes) # Now set arrays for each channel data_pos = 0 for (i, obj) in enumerate(data_objects): byte_columns = tuple( range(data_pos, obj.data_type.size + data_pos)) log.debug("Byte columns for channel %d: %s", i, byte_columns) # Select columns for this channel, so that number of values will # be number of bytes per point * number of data points. # Then use ravel to flatten the results into a vector. object_data = combined_data[:, byte_columns].ravel() # Now set correct data type, so that the array length should # be correct object_data.dtype = ( np.dtype(obj.data_type.nptype).newbyteorder(self.endianness)) obj.tdms_object._update_data(object_data) data_pos += obj.data_type.size
[ "def", "_read_interleaved_numpy", "(", "self", ",", "f", ",", "data_objects", ")", ":", "log", ".", "debug", "(", "\"Reading interleaved data all at once\"", ")", "# Read all data into 1 byte unsigned ints first", "all_channel_bytes", "=", "data_objects", "[", "0", "]", ".", "raw_data_width", "if", "all_channel_bytes", "==", "0", ":", "all_channel_bytes", "=", "sum", "(", "(", "o", ".", "data_type", ".", "size", "for", "o", "in", "data_objects", ")", ")", "log", ".", "debug", "(", "\"all_channel_bytes: %d\"", ",", "all_channel_bytes", ")", "number_bytes", "=", "int", "(", "all_channel_bytes", "*", "data_objects", "[", "0", "]", ".", "number_values", ")", "combined_data", "=", "fromfile", "(", "f", ",", "dtype", "=", "np", ".", "uint8", ",", "count", "=", "number_bytes", ")", "# Reshape, so that one row is all bytes for all objects", "combined_data", "=", "combined_data", ".", "reshape", "(", "-", "1", ",", "all_channel_bytes", ")", "# Now set arrays for each channel", "data_pos", "=", "0", "for", "(", "i", ",", "obj", ")", "in", "enumerate", "(", "data_objects", ")", ":", "byte_columns", "=", "tuple", "(", "range", "(", "data_pos", ",", "obj", ".", "data_type", ".", "size", "+", "data_pos", ")", ")", "log", ".", "debug", "(", "\"Byte columns for channel %d: %s\"", ",", "i", ",", "byte_columns", ")", "# Select columns for this channel, so that number of values will", "# be number of bytes per point * number of data points.", "# Then use ravel to flatten the results into a vector.", "object_data", "=", "combined_data", "[", ":", ",", "byte_columns", "]", ".", "ravel", "(", ")", "# Now set correct data type, so that the array length should", "# be correct", "object_data", ".", "dtype", "=", "(", "np", ".", "dtype", "(", "obj", ".", "data_type", ".", "nptype", ")", ".", "newbyteorder", "(", "self", ".", "endianness", ")", ")", "obj", ".", "tdms_object", ".", "_update_data", "(", "object_data", ")", "data_pos", "+=", "obj", ".", "data_type", ".", "size" ]
Read interleaved data where all channels have a numpy type
[ "Read", "interleaved", "data", "where", "all", "channels", "have", "a", "numpy", "type" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L534-L562
5,555
adamreeve/npTDMS
nptdms/tdms.py
_TdmsSegment._read_interleaved
def _read_interleaved(self, f, data_objects): """Read interleaved data that doesn't have a numpy type""" log.debug("Reading interleaved data point by point") object_data = {} points_added = {} for obj in data_objects: object_data[obj.path] = obj._new_segment_data() points_added[obj.path] = 0 while any([points_added[o.path] < o.number_values for o in data_objects]): for obj in data_objects: if points_added[obj.path] < obj.number_values: object_data[obj.path][points_added[obj.path]] = ( obj._read_value(f)) points_added[obj.path] += 1 for obj in data_objects: obj.tdms_object._update_data(object_data[obj.path])
python
def _read_interleaved(self, f, data_objects): """Read interleaved data that doesn't have a numpy type""" log.debug("Reading interleaved data point by point") object_data = {} points_added = {} for obj in data_objects: object_data[obj.path] = obj._new_segment_data() points_added[obj.path] = 0 while any([points_added[o.path] < o.number_values for o in data_objects]): for obj in data_objects: if points_added[obj.path] < obj.number_values: object_data[obj.path][points_added[obj.path]] = ( obj._read_value(f)) points_added[obj.path] += 1 for obj in data_objects: obj.tdms_object._update_data(object_data[obj.path])
[ "def", "_read_interleaved", "(", "self", ",", "f", ",", "data_objects", ")", ":", "log", ".", "debug", "(", "\"Reading interleaved data point by point\"", ")", "object_data", "=", "{", "}", "points_added", "=", "{", "}", "for", "obj", "in", "data_objects", ":", "object_data", "[", "obj", ".", "path", "]", "=", "obj", ".", "_new_segment_data", "(", ")", "points_added", "[", "obj", ".", "path", "]", "=", "0", "while", "any", "(", "[", "points_added", "[", "o", ".", "path", "]", "<", "o", ".", "number_values", "for", "o", "in", "data_objects", "]", ")", ":", "for", "obj", "in", "data_objects", ":", "if", "points_added", "[", "obj", ".", "path", "]", "<", "obj", ".", "number_values", ":", "object_data", "[", "obj", ".", "path", "]", "[", "points_added", "[", "obj", ".", "path", "]", "]", "=", "(", "obj", ".", "_read_value", "(", "f", ")", ")", "points_added", "[", "obj", ".", "path", "]", "+=", "1", "for", "obj", "in", "data_objects", ":", "obj", ".", "tdms_object", ".", "_update_data", "(", "object_data", "[", "obj", ".", "path", "]", ")" ]
Read interleaved data that doesn't have a numpy type
[ "Read", "interleaved", "data", "that", "doesn", "t", "have", "a", "numpy", "type" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L564-L581
5,556
adamreeve/npTDMS
nptdms/tdms.py
TdmsObject.time_track
def time_track(self, absolute_time=False, accuracy='ns'): """Return an array of time or the independent variable for this channel This depends on the object having the wf_increment and wf_start_offset properties defined. Note that wf_start_offset is usually zero for time-series data. If you have time-series data channels with different start times, you should use the absolute time or calculate the time offsets using the wf_start_time property. For larger timespans, the accuracy setting should be set lower. The default setting is 'ns', which has a timespan of [1678 AD, 2262 AD]. For the exact ranges, refer to http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html section "Datetime Units". :param absolute_time: Whether the returned time values are absolute times rather than relative to the start time. If true, the wf_start_time property must be set. :param accuracy: The accuracy of the returned datetime64 array. :rtype: NumPy array. :raises: KeyError if required properties aren't found """ try: increment = self.property('wf_increment') offset = self.property('wf_start_offset') except KeyError: raise KeyError("Object does not have time properties available.") periods = len(self._data) relative_time = np.linspace( offset, offset + (periods - 1) * increment, periods) if not absolute_time: return relative_time try: start_time = self.property('wf_start_time') except KeyError: raise KeyError( "Object does not have start time property available.") try: unit_correction = { 's': 1e0, 'ms': 1e3, 'us': 1e6, 'ns': 1e9, }[accuracy] except KeyError: raise KeyError("Invalid accuracy: {0}".format(accuracy)) # Because numpy only knows ints as its date datatype, # convert to accuracy. time_type = "timedelta64[{0}]".format(accuracy) return (np.datetime64(start_time) + (relative_time * unit_correction).astype(time_type))
python
def time_track(self, absolute_time=False, accuracy='ns'): """Return an array of time or the independent variable for this channel This depends on the object having the wf_increment and wf_start_offset properties defined. Note that wf_start_offset is usually zero for time-series data. If you have time-series data channels with different start times, you should use the absolute time or calculate the time offsets using the wf_start_time property. For larger timespans, the accuracy setting should be set lower. The default setting is 'ns', which has a timespan of [1678 AD, 2262 AD]. For the exact ranges, refer to http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html section "Datetime Units". :param absolute_time: Whether the returned time values are absolute times rather than relative to the start time. If true, the wf_start_time property must be set. :param accuracy: The accuracy of the returned datetime64 array. :rtype: NumPy array. :raises: KeyError if required properties aren't found """ try: increment = self.property('wf_increment') offset = self.property('wf_start_offset') except KeyError: raise KeyError("Object does not have time properties available.") periods = len(self._data) relative_time = np.linspace( offset, offset + (periods - 1) * increment, periods) if not absolute_time: return relative_time try: start_time = self.property('wf_start_time') except KeyError: raise KeyError( "Object does not have start time property available.") try: unit_correction = { 's': 1e0, 'ms': 1e3, 'us': 1e6, 'ns': 1e9, }[accuracy] except KeyError: raise KeyError("Invalid accuracy: {0}".format(accuracy)) # Because numpy only knows ints as its date datatype, # convert to accuracy. time_type = "timedelta64[{0}]".format(accuracy) return (np.datetime64(start_time) + (relative_time * unit_correction).astype(time_type))
[ "def", "time_track", "(", "self", ",", "absolute_time", "=", "False", ",", "accuracy", "=", "'ns'", ")", ":", "try", ":", "increment", "=", "self", ".", "property", "(", "'wf_increment'", ")", "offset", "=", "self", ".", "property", "(", "'wf_start_offset'", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "\"Object does not have time properties available.\"", ")", "periods", "=", "len", "(", "self", ".", "_data", ")", "relative_time", "=", "np", ".", "linspace", "(", "offset", ",", "offset", "+", "(", "periods", "-", "1", ")", "*", "increment", ",", "periods", ")", "if", "not", "absolute_time", ":", "return", "relative_time", "try", ":", "start_time", "=", "self", ".", "property", "(", "'wf_start_time'", ")", "except", "KeyError", ":", "raise", "KeyError", "(", "\"Object does not have start time property available.\"", ")", "try", ":", "unit_correction", "=", "{", "'s'", ":", "1e0", ",", "'ms'", ":", "1e3", ",", "'us'", ":", "1e6", ",", "'ns'", ":", "1e9", ",", "}", "[", "accuracy", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"Invalid accuracy: {0}\"", ".", "format", "(", "accuracy", ")", ")", "# Because numpy only knows ints as its date datatype,", "# convert to accuracy.", "time_type", "=", "\"timedelta64[{0}]\"", ".", "format", "(", "accuracy", ")", "return", "(", "np", ".", "datetime64", "(", "start_time", ")", "+", "(", "relative_time", "*", "unit_correction", ")", ".", "astype", "(", "time_type", ")", ")" ]
Return an array of time or the independent variable for this channel This depends on the object having the wf_increment and wf_start_offset properties defined. Note that wf_start_offset is usually zero for time-series data. If you have time-series data channels with different start times, you should use the absolute time or calculate the time offsets using the wf_start_time property. For larger timespans, the accuracy setting should be set lower. The default setting is 'ns', which has a timespan of [1678 AD, 2262 AD]. For the exact ranges, refer to http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html section "Datetime Units". :param absolute_time: Whether the returned time values are absolute times rather than relative to the start time. If true, the wf_start_time property must be set. :param accuracy: The accuracy of the returned datetime64 array. :rtype: NumPy array. :raises: KeyError if required properties aren't found
[ "Return", "an", "array", "of", "time", "or", "the", "independent", "variable", "for", "this", "channel" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L645-L706
5,557
adamreeve/npTDMS
nptdms/tdms.py
TdmsObject._initialise_data
def _initialise_data(self, memmap_dir=None): """Initialise data array to zeros""" if self.number_values == 0: pass elif self.data_type.nptype is None: self._data = [] else: if memmap_dir: memmap_file = tempfile.NamedTemporaryFile( mode='w+b', prefix="nptdms_", dir=memmap_dir) self._data = np.memmap( memmap_file.file, mode='w+', shape=(self.number_values,), dtype=self.data_type.nptype) else: self._data = np.zeros( self.number_values, dtype=self.data_type.nptype) self._data_insert_position = 0 if self._data is not None: log.debug("Allocated %d sample slots for %s", len(self._data), self.path) else: log.debug("Allocated no space for %s", self.path)
python
def _initialise_data(self, memmap_dir=None): """Initialise data array to zeros""" if self.number_values == 0: pass elif self.data_type.nptype is None: self._data = [] else: if memmap_dir: memmap_file = tempfile.NamedTemporaryFile( mode='w+b', prefix="nptdms_", dir=memmap_dir) self._data = np.memmap( memmap_file.file, mode='w+', shape=(self.number_values,), dtype=self.data_type.nptype) else: self._data = np.zeros( self.number_values, dtype=self.data_type.nptype) self._data_insert_position = 0 if self._data is not None: log.debug("Allocated %d sample slots for %s", len(self._data), self.path) else: log.debug("Allocated no space for %s", self.path)
[ "def", "_initialise_data", "(", "self", ",", "memmap_dir", "=", "None", ")", ":", "if", "self", ".", "number_values", "==", "0", ":", "pass", "elif", "self", ".", "data_type", ".", "nptype", "is", "None", ":", "self", ".", "_data", "=", "[", "]", "else", ":", "if", "memmap_dir", ":", "memmap_file", "=", "tempfile", ".", "NamedTemporaryFile", "(", "mode", "=", "'w+b'", ",", "prefix", "=", "\"nptdms_\"", ",", "dir", "=", "memmap_dir", ")", "self", ".", "_data", "=", "np", ".", "memmap", "(", "memmap_file", ".", "file", ",", "mode", "=", "'w+'", ",", "shape", "=", "(", "self", ".", "number_values", ",", ")", ",", "dtype", "=", "self", ".", "data_type", ".", "nptype", ")", "else", ":", "self", ".", "_data", "=", "np", ".", "zeros", "(", "self", ".", "number_values", ",", "dtype", "=", "self", ".", "data_type", ".", "nptype", ")", "self", ".", "_data_insert_position", "=", "0", "if", "self", ".", "_data", "is", "not", "None", ":", "log", ".", "debug", "(", "\"Allocated %d sample slots for %s\"", ",", "len", "(", "self", ".", "_data", ")", ",", "self", ".", "path", ")", "else", ":", "log", ".", "debug", "(", "\"Allocated no space for %s\"", ",", "self", ".", "path", ")" ]
Initialise data array to zeros
[ "Initialise", "data", "array", "to", "zeros" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L708-L732
5,558
adamreeve/npTDMS
nptdms/tdms.py
TdmsObject._update_data
def _update_data(self, new_data): """Update the object data with a new array of data""" log.debug("Adding %d data points to data for %s" % (len(new_data), self.path)) if self._data is None: self._data = new_data else: if self.data_type.nptype is not None: data_pos = ( self._data_insert_position, self._data_insert_position + len(new_data)) self._data_insert_position += len(new_data) self._data[data_pos[0]:data_pos[1]] = new_data else: self._data.extend(new_data)
python
def _update_data(self, new_data): """Update the object data with a new array of data""" log.debug("Adding %d data points to data for %s" % (len(new_data), self.path)) if self._data is None: self._data = new_data else: if self.data_type.nptype is not None: data_pos = ( self._data_insert_position, self._data_insert_position + len(new_data)) self._data_insert_position += len(new_data) self._data[data_pos[0]:data_pos[1]] = new_data else: self._data.extend(new_data)
[ "def", "_update_data", "(", "self", ",", "new_data", ")", ":", "log", ".", "debug", "(", "\"Adding %d data points to data for %s\"", "%", "(", "len", "(", "new_data", ")", ",", "self", ".", "path", ")", ")", "if", "self", ".", "_data", "is", "None", ":", "self", ".", "_data", "=", "new_data", "else", ":", "if", "self", ".", "data_type", ".", "nptype", "is", "not", "None", ":", "data_pos", "=", "(", "self", ".", "_data_insert_position", ",", "self", ".", "_data_insert_position", "+", "len", "(", "new_data", ")", ")", "self", ".", "_data_insert_position", "+=", "len", "(", "new_data", ")", "self", ".", "_data", "[", "data_pos", "[", "0", "]", ":", "data_pos", "[", "1", "]", "]", "=", "new_data", "else", ":", "self", ".", "_data", ".", "extend", "(", "new_data", ")" ]
Update the object data with a new array of data
[ "Update", "the", "object", "data", "with", "a", "new", "array", "of", "data" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L734-L749
5,559
adamreeve/npTDMS
nptdms/tdms.py
TdmsObject.as_dataframe
def as_dataframe(self, absolute_time=False): """ Converts the TDMS object to a DataFrame :param absolute_time: Whether times should be absolute rather than relative to the start time. :return: The TDMS object data. :rtype: pandas.DataFrame """ import pandas as pd # When absolute_time is True, # use the wf_start_time as offset for the time_track() try: time = self.time_track(absolute_time) except KeyError: time = None if self.channel is None: return pd.DataFrame.from_items( [(ch.channel, pd.Series(ch.data)) for ch in self.tdms_file.group_channels(self.group)]) else: return pd.DataFrame(self._data, index=time, columns=[self.path])
python
def as_dataframe(self, absolute_time=False): """ Converts the TDMS object to a DataFrame :param absolute_time: Whether times should be absolute rather than relative to the start time. :return: The TDMS object data. :rtype: pandas.DataFrame """ import pandas as pd # When absolute_time is True, # use the wf_start_time as offset for the time_track() try: time = self.time_track(absolute_time) except KeyError: time = None if self.channel is None: return pd.DataFrame.from_items( [(ch.channel, pd.Series(ch.data)) for ch in self.tdms_file.group_channels(self.group)]) else: return pd.DataFrame(self._data, index=time, columns=[self.path])
[ "def", "as_dataframe", "(", "self", ",", "absolute_time", "=", "False", ")", ":", "import", "pandas", "as", "pd", "# When absolute_time is True,", "# use the wf_start_time as offset for the time_track()", "try", ":", "time", "=", "self", ".", "time_track", "(", "absolute_time", ")", "except", "KeyError", ":", "time", "=", "None", "if", "self", ".", "channel", "is", "None", ":", "return", "pd", ".", "DataFrame", ".", "from_items", "(", "[", "(", "ch", ".", "channel", ",", "pd", ".", "Series", "(", "ch", ".", "data", ")", ")", "for", "ch", "in", "self", ".", "tdms_file", ".", "group_channels", "(", "self", ".", "group", ")", "]", ")", "else", ":", "return", "pd", ".", "DataFrame", "(", "self", ".", "_data", ",", "index", "=", "time", ",", "columns", "=", "[", "self", ".", "path", "]", ")" ]
Converts the TDMS object to a DataFrame :param absolute_time: Whether times should be absolute rather than relative to the start time. :return: The TDMS object data. :rtype: pandas.DataFrame
[ "Converts", "the", "TDMS", "object", "to", "a", "DataFrame" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L751-L774
5,560
adamreeve/npTDMS
nptdms/tdms.py
TdmsObject.data
def data(self): """ NumPy array containing data if there is data for this object, otherwise None. """ if self._data is None: # self._data is None if data segment is empty return np.empty((0, 1)) if self._data_scaled is None: scale = scaling.get_scaling(self) if scale is None: self._data_scaled = self._data else: self._data_scaled = scale.scale(self._data) return self._data_scaled
python
def data(self): """ NumPy array containing data if there is data for this object, otherwise None. """ if self._data is None: # self._data is None if data segment is empty return np.empty((0, 1)) if self._data_scaled is None: scale = scaling.get_scaling(self) if scale is None: self._data_scaled = self._data else: self._data_scaled = scale.scale(self._data) return self._data_scaled
[ "def", "data", "(", "self", ")", ":", "if", "self", ".", "_data", "is", "None", ":", "# self._data is None if data segment is empty", "return", "np", ".", "empty", "(", "(", "0", ",", "1", ")", ")", "if", "self", ".", "_data_scaled", "is", "None", ":", "scale", "=", "scaling", ".", "get_scaling", "(", "self", ")", "if", "scale", "is", "None", ":", "self", ".", "_data_scaled", "=", "self", ".", "_data", "else", ":", "self", ".", "_data_scaled", "=", "scale", ".", "scale", "(", "self", ".", "_data", ")", "return", "self", ".", "_data_scaled" ]
NumPy array containing data if there is data for this object, otherwise None.
[ "NumPy", "array", "containing", "data", "if", "there", "is", "data", "for", "this", "object", "otherwise", "None", "." ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L777-L792
5,561
adamreeve/npTDMS
nptdms/tdms.py
_TdmsmxDAQMetadata._read_metadata
def _read_metadata(self, f, endianness): """ Read the metadata for a DAQmx raw segment. This is the raw DAQmx-specific portion of the raw data index. """ self.data_type = types.tds_data_types[0xFFFFFFFF] self.dimension = types.Uint32.read(f, endianness) # In TDMS format version 2.0, 1 is the only valid value for dimension if self.dimension != 1: log.warning("Data dimension is not 1") self.chunk_size = types.Uint64.read(f, endianness) # size of vector of format changing scalers self.scaler_vector_length = types.Uint32.read(f, endianness) # Size of the vector log.debug("mxDAQ format scaler vector size '%d'" % (self.scaler_vector_length,)) if self.scaler_vector_length > 1: log.error("mxDAQ multiple format changing scalers not implemented") for idx in range(self.scaler_vector_length): # WARNING: This code overwrites previous values with new # values. At this time NI provides no documentation on # how to use these scalers and sample TDMS files do not # include more than one of these scalers. self.scaler_data_type_code = types.Uint32.read(f, endianness) self.scaler_data_type = ( types.tds_data_types[self.scaler_data_type_code]) # more info for format changing scaler self.scaler_raw_buffer_index = types.Uint32.read(f, endianness) self.scaler_raw_byte_offset = types.Uint32.read(f, endianness) self.scaler_sample_format_bitmap = types.Uint32.read(f, endianness) self.scale_id = types.Uint32.read(f, endianness) raw_data_widths_length = types.Uint32.read(f, endianness) self.raw_data_widths = np.zeros(raw_data_widths_length, dtype=np.int32) for cnt in range(raw_data_widths_length): self.raw_data_widths[cnt] = types.Uint32.read(f, endianness)
python
def _read_metadata(self, f, endianness): """ Read the metadata for a DAQmx raw segment. This is the raw DAQmx-specific portion of the raw data index. """ self.data_type = types.tds_data_types[0xFFFFFFFF] self.dimension = types.Uint32.read(f, endianness) # In TDMS format version 2.0, 1 is the only valid value for dimension if self.dimension != 1: log.warning("Data dimension is not 1") self.chunk_size = types.Uint64.read(f, endianness) # size of vector of format changing scalers self.scaler_vector_length = types.Uint32.read(f, endianness) # Size of the vector log.debug("mxDAQ format scaler vector size '%d'" % (self.scaler_vector_length,)) if self.scaler_vector_length > 1: log.error("mxDAQ multiple format changing scalers not implemented") for idx in range(self.scaler_vector_length): # WARNING: This code overwrites previous values with new # values. At this time NI provides no documentation on # how to use these scalers and sample TDMS files do not # include more than one of these scalers. self.scaler_data_type_code = types.Uint32.read(f, endianness) self.scaler_data_type = ( types.tds_data_types[self.scaler_data_type_code]) # more info for format changing scaler self.scaler_raw_buffer_index = types.Uint32.read(f, endianness) self.scaler_raw_byte_offset = types.Uint32.read(f, endianness) self.scaler_sample_format_bitmap = types.Uint32.read(f, endianness) self.scale_id = types.Uint32.read(f, endianness) raw_data_widths_length = types.Uint32.read(f, endianness) self.raw_data_widths = np.zeros(raw_data_widths_length, dtype=np.int32) for cnt in range(raw_data_widths_length): self.raw_data_widths[cnt] = types.Uint32.read(f, endianness)
[ "def", "_read_metadata", "(", "self", ",", "f", ",", "endianness", ")", ":", "self", ".", "data_type", "=", "types", ".", "tds_data_types", "[", "0xFFFFFFFF", "]", "self", ".", "dimension", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")", "# In TDMS format version 2.0, 1 is the only valid value for dimension", "if", "self", ".", "dimension", "!=", "1", ":", "log", ".", "warning", "(", "\"Data dimension is not 1\"", ")", "self", ".", "chunk_size", "=", "types", ".", "Uint64", ".", "read", "(", "f", ",", "endianness", ")", "# size of vector of format changing scalers", "self", ".", "scaler_vector_length", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")", "# Size of the vector", "log", ".", "debug", "(", "\"mxDAQ format scaler vector size '%d'\"", "%", "(", "self", ".", "scaler_vector_length", ",", ")", ")", "if", "self", ".", "scaler_vector_length", ">", "1", ":", "log", ".", "error", "(", "\"mxDAQ multiple format changing scalers not implemented\"", ")", "for", "idx", "in", "range", "(", "self", ".", "scaler_vector_length", ")", ":", "# WARNING: This code overwrites previous values with new", "# values. At this time NI provides no documentation on", "# how to use these scalers and sample TDMS files do not", "# include more than one of these scalers.", "self", ".", "scaler_data_type_code", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")", "self", ".", "scaler_data_type", "=", "(", "types", ".", "tds_data_types", "[", "self", ".", "scaler_data_type_code", "]", ")", "# more info for format changing scaler", "self", ".", "scaler_raw_buffer_index", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")", "self", ".", "scaler_raw_byte_offset", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")", "self", ".", "scaler_sample_format_bitmap", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")", "self", ".", "scale_id", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")", "raw_data_widths_length", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")", "self", ".", "raw_data_widths", "=", "np", ".", "zeros", "(", "raw_data_widths_length", ",", "dtype", "=", "np", ".", "int32", ")", "for", "cnt", "in", "range", "(", "raw_data_widths_length", ")", ":", "self", ".", "raw_data_widths", "[", "cnt", "]", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "endianness", ")" ]
Read the metadata for a DAQmx raw segment. This is the raw DAQmx-specific portion of the raw data index.
[ "Read", "the", "metadata", "for", "a", "DAQmx", "raw", "segment", ".", "This", "is", "the", "raw", "DAQmx", "-", "specific", "portion", "of", "the", "raw", "data", "index", "." ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L832-L869
5,562
adamreeve/npTDMS
nptdms/tdms.py
_TdmsSegmentObject._read_metadata
def _read_metadata(self, f): """Read object metadata and update object information""" raw_data_index = types.Uint32.read(f, self.endianness) log.debug("Reading metadata for object %s", self.tdms_object.path) # Object has no data in this segment if raw_data_index == 0xFFFFFFFF: log.debug("Object has no data in this segment") self.has_data = False # Leave number_values and data_size as set previously, # as these may be re-used by later segments. # Data has same structure as previously elif raw_data_index == 0x00000000: log.debug( "Object has same data structure as in the previous segment") self.has_data = True elif raw_data_index == 0x00001269 or raw_data_index == 0x00001369: # This is a DAQmx raw data segment. # 0x00001269 for segment containing Format Changing scaler. # 0x00001369 for segment containing Digital Line scaler. if raw_data_index == 0x00001369: # special scaling for DAQ's digital input lines? log.warning("DAQmx with Digital Line scaler has not tested") # DAQmx raw data format metadata has its own class self.has_data = True self.tdms_object.has_data = True info = self._read_metadata_mx(f) self.dimension = info.dimension self.data_type = info.data_type # DAQmx format has special chunking self.data_size = info.chunk_size self.number_values = info.chunk_size # segment reading code relies on a single consistent raw # data width so assert that there is only one. assert(len(info.raw_data_widths) == 1) self.raw_data_width = info.raw_data_widths[0] # fall through and read properties else: # Assume metadata format is legacy TDMS format. # raw_data_index gives the length of the index information. self.has_data = True self.tdms_object.has_data = True # Read the data type try: self.data_type = types.tds_data_types[ types.Uint32.read(f, self.endianness)] except KeyError: raise KeyError("Unrecognised data type") if (self.tdms_object.data_type is not None and self.data_type != self.tdms_object.data_type): raise ValueError( "Segment object doesn't have the same data " "type as previous segments.") else: self.tdms_object.data_type = self.data_type log.debug("Object data type: %r", self.tdms_object.data_type) if (self.tdms_object.data_type.size is None and self.tdms_object.data_type != types.String): raise ValueError( "Unsupported data type: %r" % self.tdms_object.data_type) # Read data dimension self.dimension = types.Uint32.read(f, self.endianness) # In TDMS version 2.0, 1 is the only valid value for dimension if self.dimension != 1: log.warning("Data dimension is not 1") # Read number of values self.number_values = types.Uint64.read(f, self.endianness) # Variable length data types have total size if self.data_type in (types.String, ): self.data_size = types.Uint64.read(f, self.endianness) else: self.data_size = ( self.number_values * self.data_type.size * self.dimension) log.debug( "Object number of values in segment: %d", self.number_values) # Read data properties num_properties = types.Uint32.read(f, self.endianness) log.debug("Reading %d properties", num_properties) for i in range(num_properties): prop_name, value = read_property(f, self.endianness) self.tdms_object.properties[prop_name] = value
python
def _read_metadata(self, f): """Read object metadata and update object information""" raw_data_index = types.Uint32.read(f, self.endianness) log.debug("Reading metadata for object %s", self.tdms_object.path) # Object has no data in this segment if raw_data_index == 0xFFFFFFFF: log.debug("Object has no data in this segment") self.has_data = False # Leave number_values and data_size as set previously, # as these may be re-used by later segments. # Data has same structure as previously elif raw_data_index == 0x00000000: log.debug( "Object has same data structure as in the previous segment") self.has_data = True elif raw_data_index == 0x00001269 or raw_data_index == 0x00001369: # This is a DAQmx raw data segment. # 0x00001269 for segment containing Format Changing scaler. # 0x00001369 for segment containing Digital Line scaler. if raw_data_index == 0x00001369: # special scaling for DAQ's digital input lines? log.warning("DAQmx with Digital Line scaler has not tested") # DAQmx raw data format metadata has its own class self.has_data = True self.tdms_object.has_data = True info = self._read_metadata_mx(f) self.dimension = info.dimension self.data_type = info.data_type # DAQmx format has special chunking self.data_size = info.chunk_size self.number_values = info.chunk_size # segment reading code relies on a single consistent raw # data width so assert that there is only one. assert(len(info.raw_data_widths) == 1) self.raw_data_width = info.raw_data_widths[0] # fall through and read properties else: # Assume metadata format is legacy TDMS format. # raw_data_index gives the length of the index information. self.has_data = True self.tdms_object.has_data = True # Read the data type try: self.data_type = types.tds_data_types[ types.Uint32.read(f, self.endianness)] except KeyError: raise KeyError("Unrecognised data type") if (self.tdms_object.data_type is not None and self.data_type != self.tdms_object.data_type): raise ValueError( "Segment object doesn't have the same data " "type as previous segments.") else: self.tdms_object.data_type = self.data_type log.debug("Object data type: %r", self.tdms_object.data_type) if (self.tdms_object.data_type.size is None and self.tdms_object.data_type != types.String): raise ValueError( "Unsupported data type: %r" % self.tdms_object.data_type) # Read data dimension self.dimension = types.Uint32.read(f, self.endianness) # In TDMS version 2.0, 1 is the only valid value for dimension if self.dimension != 1: log.warning("Data dimension is not 1") # Read number of values self.number_values = types.Uint64.read(f, self.endianness) # Variable length data types have total size if self.data_type in (types.String, ): self.data_size = types.Uint64.read(f, self.endianness) else: self.data_size = ( self.number_values * self.data_type.size * self.dimension) log.debug( "Object number of values in segment: %d", self.number_values) # Read data properties num_properties = types.Uint32.read(f, self.endianness) log.debug("Reading %d properties", num_properties) for i in range(num_properties): prop_name, value = read_property(f, self.endianness) self.tdms_object.properties[prop_name] = value
[ "def", "_read_metadata", "(", "self", ",", "f", ")", ":", "raw_data_index", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "log", ".", "debug", "(", "\"Reading metadata for object %s\"", ",", "self", ".", "tdms_object", ".", "path", ")", "# Object has no data in this segment", "if", "raw_data_index", "==", "0xFFFFFFFF", ":", "log", ".", "debug", "(", "\"Object has no data in this segment\"", ")", "self", ".", "has_data", "=", "False", "# Leave number_values and data_size as set previously,", "# as these may be re-used by later segments.", "# Data has same structure as previously", "elif", "raw_data_index", "==", "0x00000000", ":", "log", ".", "debug", "(", "\"Object has same data structure as in the previous segment\"", ")", "self", ".", "has_data", "=", "True", "elif", "raw_data_index", "==", "0x00001269", "or", "raw_data_index", "==", "0x00001369", ":", "# This is a DAQmx raw data segment.", "# 0x00001269 for segment containing Format Changing scaler.", "# 0x00001369 for segment containing Digital Line scaler.", "if", "raw_data_index", "==", "0x00001369", ":", "# special scaling for DAQ's digital input lines?", "log", ".", "warning", "(", "\"DAQmx with Digital Line scaler has not tested\"", ")", "# DAQmx raw data format metadata has its own class", "self", ".", "has_data", "=", "True", "self", ".", "tdms_object", ".", "has_data", "=", "True", "info", "=", "self", ".", "_read_metadata_mx", "(", "f", ")", "self", ".", "dimension", "=", "info", ".", "dimension", "self", ".", "data_type", "=", "info", ".", "data_type", "# DAQmx format has special chunking", "self", ".", "data_size", "=", "info", ".", "chunk_size", "self", ".", "number_values", "=", "info", ".", "chunk_size", "# segment reading code relies on a single consistent raw", "# data width so assert that there is only one.", "assert", "(", "len", "(", "info", ".", "raw_data_widths", ")", "==", "1", ")", "self", ".", "raw_data_width", "=", "info", ".", "raw_data_widths", "[", "0", "]", "# fall through and read properties", "else", ":", "# Assume metadata format is legacy TDMS format.", "# raw_data_index gives the length of the index information.", "self", ".", "has_data", "=", "True", "self", ".", "tdms_object", ".", "has_data", "=", "True", "# Read the data type", "try", ":", "self", ".", "data_type", "=", "types", ".", "tds_data_types", "[", "types", ".", "Uint32", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"Unrecognised data type\"", ")", "if", "(", "self", ".", "tdms_object", ".", "data_type", "is", "not", "None", "and", "self", ".", "data_type", "!=", "self", ".", "tdms_object", ".", "data_type", ")", ":", "raise", "ValueError", "(", "\"Segment object doesn't have the same data \"", "\"type as previous segments.\"", ")", "else", ":", "self", ".", "tdms_object", ".", "data_type", "=", "self", ".", "data_type", "log", ".", "debug", "(", "\"Object data type: %r\"", ",", "self", ".", "tdms_object", ".", "data_type", ")", "if", "(", "self", ".", "tdms_object", ".", "data_type", ".", "size", "is", "None", "and", "self", ".", "tdms_object", ".", "data_type", "!=", "types", ".", "String", ")", ":", "raise", "ValueError", "(", "\"Unsupported data type: %r\"", "%", "self", ".", "tdms_object", ".", "data_type", ")", "# Read data dimension", "self", ".", "dimension", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "# In TDMS version 2.0, 1 is the only valid value for dimension", "if", "self", ".", "dimension", "!=", "1", ":", "log", ".", "warning", "(", "\"Data dimension is not 1\"", ")", "# Read number of values", "self", ".", "number_values", "=", "types", ".", "Uint64", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "# Variable length data types have total size", "if", "self", ".", "data_type", "in", "(", "types", ".", "String", ",", ")", ":", "self", ".", "data_size", "=", "types", ".", "Uint64", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "else", ":", "self", ".", "data_size", "=", "(", "self", ".", "number_values", "*", "self", ".", "data_type", ".", "size", "*", "self", ".", "dimension", ")", "log", ".", "debug", "(", "\"Object number of values in segment: %d\"", ",", "self", ".", "number_values", ")", "# Read data properties", "num_properties", "=", "types", ".", "Uint32", ".", "read", "(", "f", ",", "self", ".", "endianness", ")", "log", ".", "debug", "(", "\"Reading %d properties\"", ",", "num_properties", ")", "for", "i", "in", "range", "(", "num_properties", ")", ":", "prop_name", ",", "value", "=", "read_property", "(", "f", ",", "self", ".", "endianness", ")", "self", ".", "tdms_object", ".", "properties", "[", "prop_name", "]", "=", "value" ]
Read object metadata and update object information
[ "Read", "object", "metadata", "and", "update", "object", "information" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L919-L1011
5,563
adamreeve/npTDMS
nptdms/tdms.py
_TdmsSegmentObject._read_value
def _read_value(self, file): """Read a single value from the given file""" if self.data_type.nptype is not None: dtype = (np.dtype(self.data_type.nptype).newbyteorder( self.endianness)) return fromfile(file, dtype=dtype, count=1) return self.data_type.read(file, self.endianness)
python
def _read_value(self, file): """Read a single value from the given file""" if self.data_type.nptype is not None: dtype = (np.dtype(self.data_type.nptype).newbyteorder( self.endianness)) return fromfile(file, dtype=dtype, count=1) return self.data_type.read(file, self.endianness)
[ "def", "_read_value", "(", "self", ",", "file", ")", ":", "if", "self", ".", "data_type", ".", "nptype", "is", "not", "None", ":", "dtype", "=", "(", "np", ".", "dtype", "(", "self", ".", "data_type", ".", "nptype", ")", ".", "newbyteorder", "(", "self", ".", "endianness", ")", ")", "return", "fromfile", "(", "file", ",", "dtype", "=", "dtype", ",", "count", "=", "1", ")", "return", "self", ".", "data_type", ".", "read", "(", "file", ",", "self", ".", "endianness", ")" ]
Read a single value from the given file
[ "Read", "a", "single", "value", "from", "the", "given", "file" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1017-L1024
5,564
adamreeve/npTDMS
nptdms/tdms.py
_TdmsSegmentObject._read_values
def _read_values(self, file, number_values): """Read all values for this object from a contiguous segment""" if self.data_type.nptype is not None: dtype = (np.dtype(self.data_type.nptype).newbyteorder( self.endianness)) return fromfile(file, dtype=dtype, count=number_values) elif self.data_type == types.String: return read_string_data(file, number_values, self.endianness) data = self._new_segment_data() for i in range(number_values): data[i] = self.data_type.read(file, self.endianness) return data
python
def _read_values(self, file, number_values): """Read all values for this object from a contiguous segment""" if self.data_type.nptype is not None: dtype = (np.dtype(self.data_type.nptype).newbyteorder( self.endianness)) return fromfile(file, dtype=dtype, count=number_values) elif self.data_type == types.String: return read_string_data(file, number_values, self.endianness) data = self._new_segment_data() for i in range(number_values): data[i] = self.data_type.read(file, self.endianness) return data
[ "def", "_read_values", "(", "self", ",", "file", ",", "number_values", ")", ":", "if", "self", ".", "data_type", ".", "nptype", "is", "not", "None", ":", "dtype", "=", "(", "np", ".", "dtype", "(", "self", ".", "data_type", ".", "nptype", ")", ".", "newbyteorder", "(", "self", ".", "endianness", ")", ")", "return", "fromfile", "(", "file", ",", "dtype", "=", "dtype", ",", "count", "=", "number_values", ")", "elif", "self", ".", "data_type", "==", "types", ".", "String", ":", "return", "read_string_data", "(", "file", ",", "number_values", ",", "self", ".", "endianness", ")", "data", "=", "self", ".", "_new_segment_data", "(", ")", "for", "i", "in", "range", "(", "number_values", ")", ":", "data", "[", "i", "]", "=", "self", ".", "data_type", ".", "read", "(", "file", ",", "self", ".", "endianness", ")", "return", "data" ]
Read all values for this object from a contiguous segment
[ "Read", "all", "values", "for", "this", "object", "from", "a", "contiguous", "segment" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1026-L1038
5,565
adamreeve/npTDMS
nptdms/tdms.py
_TdmsSegmentObject._new_segment_data
def _new_segment_data(self): """Return a new array to read the data of the current section into""" if self.data_type.nptype is not None: return np.zeros(self.number_values, dtype=self.data_type.nptype) else: return [None] * self.number_values
python
def _new_segment_data(self): """Return a new array to read the data of the current section into""" if self.data_type.nptype is not None: return np.zeros(self.number_values, dtype=self.data_type.nptype) else: return [None] * self.number_values
[ "def", "_new_segment_data", "(", "self", ")", ":", "if", "self", ".", "data_type", ".", "nptype", "is", "not", "None", ":", "return", "np", ".", "zeros", "(", "self", ".", "number_values", ",", "dtype", "=", "self", ".", "data_type", ".", "nptype", ")", "else", ":", "return", "[", "None", "]", "*", "self", ".", "number_values" ]
Return a new array to read the data of the current section into
[ "Return", "a", "new", "array", "to", "read", "the", "data", "of", "the", "current", "section", "into" ]
d7d6632d4ebc2e78ed941477c2f1c56bd7493d74
https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1040-L1046
5,566
apriha/lineage
src/lineage/snps.py
detect_build
def detect_build(snps): """ Detect build of SNPs. Use the coordinates of common SNPs to identify the build / assembly of a genotype file that is being loaded. Notes ----- rs3094315 : plus strand in 36, 37, and 38 rs11928389 : plus strand in 36, minus strand in 37 and 38 rs2500347 : plus strand in 36 and 37, minus strand in 38 rs964481 : plus strand in 36, 37, and 38 rs2341354 : plus strand in 36, 37, and 38 Parameters ---------- snps : pandas.DataFrame SNPs to add Returns ------- int detected build of SNPs, else None References ---------- ..[1] Yates et. al. (doi:10.1093/bioinformatics/btu613), http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613 ..[2] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 ..[3] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K. dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;29(1):308-11. ..[4] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315, rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from: http://www.ncbi.nlm.nih.gov/SNP/ """ def lookup_build_with_snp_pos(pos, s): try: return s.loc[s == pos].index[0] except: return None build = None rsids = ["rs3094315", "rs11928389", "rs2500347", "rs964481", "rs2341354"] df = pd.DataFrame( { 36: [742429, 50908372, 143649677, 27566744, 908436], 37: [752566, 50927009, 144938320, 27656823, 918573], 38: [817186, 50889578, 148946169, 27638706, 983193], }, index=rsids, ) for rsid in rsids: if rsid in snps.index: build = lookup_build_with_snp_pos(snps.loc[rsid].pos, df.loc[rsid]) if build is not None: break return build
python
def detect_build(snps): """ Detect build of SNPs. Use the coordinates of common SNPs to identify the build / assembly of a genotype file that is being loaded. Notes ----- rs3094315 : plus strand in 36, 37, and 38 rs11928389 : plus strand in 36, minus strand in 37 and 38 rs2500347 : plus strand in 36 and 37, minus strand in 38 rs964481 : plus strand in 36, 37, and 38 rs2341354 : plus strand in 36, 37, and 38 Parameters ---------- snps : pandas.DataFrame SNPs to add Returns ------- int detected build of SNPs, else None References ---------- ..[1] Yates et. al. (doi:10.1093/bioinformatics/btu613), http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613 ..[2] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 ..[3] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K. dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;29(1):308-11. ..[4] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315, rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from: http://www.ncbi.nlm.nih.gov/SNP/ """ def lookup_build_with_snp_pos(pos, s): try: return s.loc[s == pos].index[0] except: return None build = None rsids = ["rs3094315", "rs11928389", "rs2500347", "rs964481", "rs2341354"] df = pd.DataFrame( { 36: [742429, 50908372, 143649677, 27566744, 908436], 37: [752566, 50927009, 144938320, 27656823, 918573], 38: [817186, 50889578, 148946169, 27638706, 983193], }, index=rsids, ) for rsid in rsids: if rsid in snps.index: build = lookup_build_with_snp_pos(snps.loc[rsid].pos, df.loc[rsid]) if build is not None: break return build
[ "def", "detect_build", "(", "snps", ")", ":", "def", "lookup_build_with_snp_pos", "(", "pos", ",", "s", ")", ":", "try", ":", "return", "s", ".", "loc", "[", "s", "==", "pos", "]", ".", "index", "[", "0", "]", "except", ":", "return", "None", "build", "=", "None", "rsids", "=", "[", "\"rs3094315\"", ",", "\"rs11928389\"", ",", "\"rs2500347\"", ",", "\"rs964481\"", ",", "\"rs2341354\"", "]", "df", "=", "pd", ".", "DataFrame", "(", "{", "36", ":", "[", "742429", ",", "50908372", ",", "143649677", ",", "27566744", ",", "908436", "]", ",", "37", ":", "[", "752566", ",", "50927009", ",", "144938320", ",", "27656823", ",", "918573", "]", ",", "38", ":", "[", "817186", ",", "50889578", ",", "148946169", ",", "27638706", ",", "983193", "]", ",", "}", ",", "index", "=", "rsids", ",", ")", "for", "rsid", "in", "rsids", ":", "if", "rsid", "in", "snps", ".", "index", ":", "build", "=", "lookup_build_with_snp_pos", "(", "snps", ".", "loc", "[", "rsid", "]", ".", "pos", ",", "df", ".", "loc", "[", "rsid", "]", ")", "if", "build", "is", "not", "None", ":", "break", "return", "build" ]
Detect build of SNPs. Use the coordinates of common SNPs to identify the build / assembly of a genotype file that is being loaded. Notes ----- rs3094315 : plus strand in 36, 37, and 38 rs11928389 : plus strand in 36, minus strand in 37 and 38 rs2500347 : plus strand in 36 and 37, minus strand in 38 rs964481 : plus strand in 36, 37, and 38 rs2341354 : plus strand in 36, 37, and 38 Parameters ---------- snps : pandas.DataFrame SNPs to add Returns ------- int detected build of SNPs, else None References ---------- ..[1] Yates et. al. (doi:10.1093/bioinformatics/btu613), http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613 ..[2] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 ..[3] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K. dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;29(1):308-11. ..[4] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315, rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from: http://www.ncbi.nlm.nih.gov/SNP/
[ "Detect", "build", "of", "SNPs", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L491-L553
5,567
apriha/lineage
src/lineage/snps.py
get_chromosomes
def get_chromosomes(snps): """ Get the chromosomes of SNPs. Parameters ---------- snps : pandas.DataFrame Returns ------- list list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes """ if isinstance(snps, pd.DataFrame): return list(pd.unique(snps["chrom"])) else: return []
python
def get_chromosomes(snps): """ Get the chromosomes of SNPs. Parameters ---------- snps : pandas.DataFrame Returns ------- list list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes """ if isinstance(snps, pd.DataFrame): return list(pd.unique(snps["chrom"])) else: return []
[ "def", "get_chromosomes", "(", "snps", ")", ":", "if", "isinstance", "(", "snps", ",", "pd", ".", "DataFrame", ")", ":", "return", "list", "(", "pd", ".", "unique", "(", "snps", "[", "\"chrom\"", "]", ")", ")", "else", ":", "return", "[", "]" ]
Get the chromosomes of SNPs. Parameters ---------- snps : pandas.DataFrame Returns ------- list list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
[ "Get", "the", "chromosomes", "of", "SNPs", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L597-L613
5,568
apriha/lineage
src/lineage/snps.py
get_chromosomes_summary
def get_chromosomes_summary(snps): """ Summary of the chromosomes of SNPs. Parameters ---------- snps : pandas.DataFrame Returns ------- str human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes """ if isinstance(snps, pd.DataFrame): chroms = list(pd.unique(snps["chrom"])) int_chroms = [int(chrom) for chrom in chroms if chrom.isdigit()] str_chroms = [chrom for chrom in chroms if not chrom.isdigit()] # https://codereview.stackexchange.com/a/5202 def as_range(iterable): l = list(iterable) if len(l) > 1: return "{0}-{1}".format(l[0], l[-1]) else: return "{0}".format(l[0]) # create str representations int_chroms = ", ".join( as_range(g) for _, g in groupby(int_chroms, key=lambda n, c=count(): n - next(c)) ) str_chroms = ", ".join(str_chroms) if int_chroms != "" and str_chroms != "": int_chroms += ", " return int_chroms + str_chroms else: return ""
python
def get_chromosomes_summary(snps): """ Summary of the chromosomes of SNPs. Parameters ---------- snps : pandas.DataFrame Returns ------- str human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes """ if isinstance(snps, pd.DataFrame): chroms = list(pd.unique(snps["chrom"])) int_chroms = [int(chrom) for chrom in chroms if chrom.isdigit()] str_chroms = [chrom for chrom in chroms if not chrom.isdigit()] # https://codereview.stackexchange.com/a/5202 def as_range(iterable): l = list(iterable) if len(l) > 1: return "{0}-{1}".format(l[0], l[-1]) else: return "{0}".format(l[0]) # create str representations int_chroms = ", ".join( as_range(g) for _, g in groupby(int_chroms, key=lambda n, c=count(): n - next(c)) ) str_chroms = ", ".join(str_chroms) if int_chroms != "" and str_chroms != "": int_chroms += ", " return int_chroms + str_chroms else: return ""
[ "def", "get_chromosomes_summary", "(", "snps", ")", ":", "if", "isinstance", "(", "snps", ",", "pd", ".", "DataFrame", ")", ":", "chroms", "=", "list", "(", "pd", ".", "unique", "(", "snps", "[", "\"chrom\"", "]", ")", ")", "int_chroms", "=", "[", "int", "(", "chrom", ")", "for", "chrom", "in", "chroms", "if", "chrom", ".", "isdigit", "(", ")", "]", "str_chroms", "=", "[", "chrom", "for", "chrom", "in", "chroms", "if", "not", "chrom", ".", "isdigit", "(", ")", "]", "# https://codereview.stackexchange.com/a/5202", "def", "as_range", "(", "iterable", ")", ":", "l", "=", "list", "(", "iterable", ")", "if", "len", "(", "l", ")", ">", "1", ":", "return", "\"{0}-{1}\"", ".", "format", "(", "l", "[", "0", "]", ",", "l", "[", "-", "1", "]", ")", "else", ":", "return", "\"{0}\"", ".", "format", "(", "l", "[", "0", "]", ")", "# create str representations", "int_chroms", "=", "\", \"", ".", "join", "(", "as_range", "(", "g", ")", "for", "_", ",", "g", "in", "groupby", "(", "int_chroms", ",", "key", "=", "lambda", "n", ",", "c", "=", "count", "(", ")", ":", "n", "-", "next", "(", "c", ")", ")", ")", "str_chroms", "=", "\", \"", ".", "join", "(", "str_chroms", ")", "if", "int_chroms", "!=", "\"\"", "and", "str_chroms", "!=", "\"\"", ":", "int_chroms", "+=", "\", \"", "return", "int_chroms", "+", "str_chroms", "else", ":", "return", "\"\"" ]
Summary of the chromosomes of SNPs. Parameters ---------- snps : pandas.DataFrame Returns ------- str human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes
[ "Summary", "of", "the", "chromosomes", "of", "SNPs", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L616-L655
5,569
apriha/lineage
src/lineage/snps.py
determine_sex
def determine_sex( snps, y_snps_not_null_threshold=0.1, heterozygous_x_snps_threshold=0.01 ): """ Determine sex from SNPs using thresholds. Parameters ---------- snps : pandas.DataFrame y_snps_not_null_threshold : float percentage Y SNPs that are not null; above this threshold, Male is determined heterozygous_x_snps_threshold : float percentage heterozygous X SNPs; above this threshold, Female is determined Returns ------- str 'Male' or 'Female' if detected, else empty str """ if isinstance(snps, pd.DataFrame): y_snps = len(snps.loc[(snps["chrom"] == "Y")]) if y_snps > 0: y_snps_not_null = len( snps.loc[(snps["chrom"] == "Y") & (snps["genotype"].notnull())] ) if y_snps_not_null / y_snps > y_snps_not_null_threshold: return "Male" else: return "Female" x_snps = len(snps.loc[snps["chrom"] == "X"]) if x_snps == 0: return "" heterozygous_x_snps = len( snps.loc[ (snps["chrom"] == "X") & (snps["genotype"].notnull()) & (snps["genotype"].str[0] != snps["genotype"].str[1]) ] ) if heterozygous_x_snps / x_snps > heterozygous_x_snps_threshold: return "Female" else: return "Male" else: return ""
python
def determine_sex( snps, y_snps_not_null_threshold=0.1, heterozygous_x_snps_threshold=0.01 ): """ Determine sex from SNPs using thresholds. Parameters ---------- snps : pandas.DataFrame y_snps_not_null_threshold : float percentage Y SNPs that are not null; above this threshold, Male is determined heterozygous_x_snps_threshold : float percentage heterozygous X SNPs; above this threshold, Female is determined Returns ------- str 'Male' or 'Female' if detected, else empty str """ if isinstance(snps, pd.DataFrame): y_snps = len(snps.loc[(snps["chrom"] == "Y")]) if y_snps > 0: y_snps_not_null = len( snps.loc[(snps["chrom"] == "Y") & (snps["genotype"].notnull())] ) if y_snps_not_null / y_snps > y_snps_not_null_threshold: return "Male" else: return "Female" x_snps = len(snps.loc[snps["chrom"] == "X"]) if x_snps == 0: return "" heterozygous_x_snps = len( snps.loc[ (snps["chrom"] == "X") & (snps["genotype"].notnull()) & (snps["genotype"].str[0] != snps["genotype"].str[1]) ] ) if heterozygous_x_snps / x_snps > heterozygous_x_snps_threshold: return "Female" else: return "Male" else: return ""
[ "def", "determine_sex", "(", "snps", ",", "y_snps_not_null_threshold", "=", "0.1", ",", "heterozygous_x_snps_threshold", "=", "0.01", ")", ":", "if", "isinstance", "(", "snps", ",", "pd", ".", "DataFrame", ")", ":", "y_snps", "=", "len", "(", "snps", ".", "loc", "[", "(", "snps", "[", "\"chrom\"", "]", "==", "\"Y\"", ")", "]", ")", "if", "y_snps", ">", "0", ":", "y_snps_not_null", "=", "len", "(", "snps", ".", "loc", "[", "(", "snps", "[", "\"chrom\"", "]", "==", "\"Y\"", ")", "&", "(", "snps", "[", "\"genotype\"", "]", ".", "notnull", "(", ")", ")", "]", ")", "if", "y_snps_not_null", "/", "y_snps", ">", "y_snps_not_null_threshold", ":", "return", "\"Male\"", "else", ":", "return", "\"Female\"", "x_snps", "=", "len", "(", "snps", ".", "loc", "[", "snps", "[", "\"chrom\"", "]", "==", "\"X\"", "]", ")", "if", "x_snps", "==", "0", ":", "return", "\"\"", "heterozygous_x_snps", "=", "len", "(", "snps", ".", "loc", "[", "(", "snps", "[", "\"chrom\"", "]", "==", "\"X\"", ")", "&", "(", "snps", "[", "\"genotype\"", "]", ".", "notnull", "(", ")", ")", "&", "(", "snps", "[", "\"genotype\"", "]", ".", "str", "[", "0", "]", "!=", "snps", "[", "\"genotype\"", "]", ".", "str", "[", "1", "]", ")", "]", ")", "if", "heterozygous_x_snps", "/", "x_snps", ">", "heterozygous_x_snps_threshold", ":", "return", "\"Female\"", "else", ":", "return", "\"Male\"", "else", ":", "return", "\"\"" ]
Determine sex from SNPs using thresholds. Parameters ---------- snps : pandas.DataFrame y_snps_not_null_threshold : float percentage Y SNPs that are not null; above this threshold, Male is determined heterozygous_x_snps_threshold : float percentage heterozygous X SNPs; above this threshold, Female is determined Returns ------- str 'Male' or 'Female' if detected, else empty str
[ "Determine", "sex", "from", "SNPs", "using", "thresholds", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L658-L708
5,570
apriha/lineage
src/lineage/snps.py
sort_snps
def sort_snps(snps): """ Sort SNPs based on ordered chromosome list and position. """ sorted_list = sorted(snps["chrom"].unique(), key=_natural_sort_key) # move PAR and MT to the end of the dataframe if "PAR" in sorted_list: sorted_list.remove("PAR") sorted_list.append("PAR") if "MT" in sorted_list: sorted_list.remove("MT") sorted_list.append("MT") # convert chrom column to category for sorting # https://stackoverflow.com/a/26707444 snps["chrom"] = snps["chrom"].astype( CategoricalDtype(categories=sorted_list, ordered=True) ) # sort based on ordered chromosome list and position snps = snps.sort_values(["chrom", "pos"]) # convert chromosome back to object snps["chrom"] = snps["chrom"].astype(object) return snps
python
def sort_snps(snps): """ Sort SNPs based on ordered chromosome list and position. """ sorted_list = sorted(snps["chrom"].unique(), key=_natural_sort_key) # move PAR and MT to the end of the dataframe if "PAR" in sorted_list: sorted_list.remove("PAR") sorted_list.append("PAR") if "MT" in sorted_list: sorted_list.remove("MT") sorted_list.append("MT") # convert chrom column to category for sorting # https://stackoverflow.com/a/26707444 snps["chrom"] = snps["chrom"].astype( CategoricalDtype(categories=sorted_list, ordered=True) ) # sort based on ordered chromosome list and position snps = snps.sort_values(["chrom", "pos"]) # convert chromosome back to object snps["chrom"] = snps["chrom"].astype(object) return snps
[ "def", "sort_snps", "(", "snps", ")", ":", "sorted_list", "=", "sorted", "(", "snps", "[", "\"chrom\"", "]", ".", "unique", "(", ")", ",", "key", "=", "_natural_sort_key", ")", "# move PAR and MT to the end of the dataframe", "if", "\"PAR\"", "in", "sorted_list", ":", "sorted_list", ".", "remove", "(", "\"PAR\"", ")", "sorted_list", ".", "append", "(", "\"PAR\"", ")", "if", "\"MT\"", "in", "sorted_list", ":", "sorted_list", ".", "remove", "(", "\"MT\"", ")", "sorted_list", ".", "append", "(", "\"MT\"", ")", "# convert chrom column to category for sorting", "# https://stackoverflow.com/a/26707444", "snps", "[", "\"chrom\"", "]", "=", "snps", "[", "\"chrom\"", "]", ".", "astype", "(", "CategoricalDtype", "(", "categories", "=", "sorted_list", ",", "ordered", "=", "True", ")", ")", "# sort based on ordered chromosome list and position", "snps", "=", "snps", ".", "sort_values", "(", "[", "\"chrom\"", ",", "\"pos\"", "]", ")", "# convert chromosome back to object", "snps", "[", "\"chrom\"", "]", "=", "snps", "[", "\"chrom\"", "]", ".", "astype", "(", "object", ")", "return", "snps" ]
Sort SNPs based on ordered chromosome list and position.
[ "Sort", "SNPs", "based", "on", "ordered", "chromosome", "list", "and", "position", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L711-L737
5,571
apriha/lineage
src/lineage/snps.py
SNPs.get_summary
def get_summary(self): """ Get summary of ``SNPs``. Returns ------- dict summary info, else None if ``SNPs`` is not valid """ if not self.is_valid(): return None else: return { "source": self.source, "assembly": self.assembly, "build": self.build, "build_detected": self.build_detected, "snp_count": self.snp_count, "chromosomes": self.chromosomes_summary, "sex": self.sex, }
python
def get_summary(self): """ Get summary of ``SNPs``. Returns ------- dict summary info, else None if ``SNPs`` is not valid """ if not self.is_valid(): return None else: return { "source": self.source, "assembly": self.assembly, "build": self.build, "build_detected": self.build_detected, "snp_count": self.snp_count, "chromosomes": self.chromosomes_summary, "sex": self.sex, }
[ "def", "get_summary", "(", "self", ")", ":", "if", "not", "self", ".", "is_valid", "(", ")", ":", "return", "None", "else", ":", "return", "{", "\"source\"", ":", "self", ".", "source", ",", "\"assembly\"", ":", "self", ".", "assembly", ",", "\"build\"", ":", "self", ".", "build", ",", "\"build_detected\"", ":", "self", ".", "build_detected", ",", "\"snp_count\"", ":", "self", ".", "snp_count", ",", "\"chromosomes\"", ":", "self", ".", "chromosomes_summary", ",", "\"sex\"", ":", "self", ".", "sex", ",", "}" ]
Get summary of ``SNPs``. Returns ------- dict summary info, else None if ``SNPs`` is not valid
[ "Get", "summary", "of", "SNPs", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L113-L132
5,572
apriha/lineage
src/lineage/snps.py
SNPs._read_23andme
def _read_23andme(file): """ Read and parse 23andMe file. https://www.23andme.com Parameters ---------- file : str path to file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source """ df = pd.read_csv( file, comment="#", sep="\t", na_values="--", names=["rsid", "chrom", "pos", "genotype"], index_col=0, dtype={"chrom": object}, ) return sort_snps(df), "23andMe"
python
def _read_23andme(file): """ Read and parse 23andMe file. https://www.23andme.com Parameters ---------- file : str path to file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source """ df = pd.read_csv( file, comment="#", sep="\t", na_values="--", names=["rsid", "chrom", "pos", "genotype"], index_col=0, dtype={"chrom": object}, ) return sort_snps(df), "23andMe"
[ "def", "_read_23andme", "(", "file", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "file", ",", "comment", "=", "\"#\"", ",", "sep", "=", "\"\\t\"", ",", "na_values", "=", "\"--\"", ",", "names", "=", "[", "\"rsid\"", ",", "\"chrom\"", ",", "\"pos\"", ",", "\"genotype\"", "]", ",", "index_col", "=", "0", ",", "dtype", "=", "{", "\"chrom\"", ":", "object", "}", ",", ")", "return", "sort_snps", "(", "df", ")", ",", "\"23andMe\"" ]
Read and parse 23andMe file. https://www.23andme.com Parameters ---------- file : str path to file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source
[ "Read", "and", "parse", "23andMe", "file", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L204-L231
5,573
apriha/lineage
src/lineage/snps.py
SNPs._read_lineage_csv
def _read_lineage_csv(file, comments): """ Read and parse CSV file generated by lineage. Parameters ---------- file : str path to file comments : str comments at beginning of file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source(s) """ source = "" for comment in comments.split("\n"): if "Source(s):" in comment: source = comment.split("Source(s):")[1].strip() break df = pd.read_csv( file, comment="#", header=0, na_values="--", names=["rsid", "chrom", "pos", "genotype"], index_col=0, dtype={"chrom": object, "pos": np.int64}, ) return sort_snps(df), source
python
def _read_lineage_csv(file, comments): """ Read and parse CSV file generated by lineage. Parameters ---------- file : str path to file comments : str comments at beginning of file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source(s) """ source = "" for comment in comments.split("\n"): if "Source(s):" in comment: source = comment.split("Source(s):")[1].strip() break df = pd.read_csv( file, comment="#", header=0, na_values="--", names=["rsid", "chrom", "pos", "genotype"], index_col=0, dtype={"chrom": object, "pos": np.int64}, ) return sort_snps(df), source
[ "def", "_read_lineage_csv", "(", "file", ",", "comments", ")", ":", "source", "=", "\"\"", "for", "comment", "in", "comments", ".", "split", "(", "\"\\n\"", ")", ":", "if", "\"Source(s):\"", "in", "comment", ":", "source", "=", "comment", ".", "split", "(", "\"Source(s):\"", ")", "[", "1", "]", ".", "strip", "(", ")", "break", "df", "=", "pd", ".", "read_csv", "(", "file", ",", "comment", "=", "\"#\"", ",", "header", "=", "0", ",", "na_values", "=", "\"--\"", ",", "names", "=", "[", "\"rsid\"", ",", "\"chrom\"", ",", "\"pos\"", ",", "\"genotype\"", "]", ",", "index_col", "=", "0", ",", "dtype", "=", "{", "\"chrom\"", ":", "object", ",", "\"pos\"", ":", "np", ".", "int64", "}", ",", ")", "return", "sort_snps", "(", "df", ")", ",", "source" ]
Read and parse CSV file generated by lineage. Parameters ---------- file : str path to file comments : str comments at beginning of file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source(s)
[ "Read", "and", "parse", "CSV", "file", "generated", "by", "lineage", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L354-L387
5,574
apriha/lineage
src/lineage/snps.py
SNPs._read_generic_csv
def _read_generic_csv(file): """ Read and parse generic CSV file. Notes ----- Assumes columns are 'rsid', 'chrom' / 'chromosome', 'pos' / 'position', and 'genotype'; values are comma separated; unreported genotypes are indicated by '--'; and one header row precedes data. For example: rsid,chromosome,position,genotype rs1,1,1,AA rs2,1,2,CC rs3,1,3,-- Parameters ---------- file : str path to file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source """ df = pd.read_csv( file, skiprows=1, na_values="--", names=["rsid", "chrom", "pos", "genotype"], index_col=0, dtype={"chrom": object, "pos": np.int64}, ) return sort_snps(df), "generic"
python
def _read_generic_csv(file): """ Read and parse generic CSV file. Notes ----- Assumes columns are 'rsid', 'chrom' / 'chromosome', 'pos' / 'position', and 'genotype'; values are comma separated; unreported genotypes are indicated by '--'; and one header row precedes data. For example: rsid,chromosome,position,genotype rs1,1,1,AA rs2,1,2,CC rs3,1,3,-- Parameters ---------- file : str path to file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source """ df = pd.read_csv( file, skiprows=1, na_values="--", names=["rsid", "chrom", "pos", "genotype"], index_col=0, dtype={"chrom": object, "pos": np.int64}, ) return sort_snps(df), "generic"
[ "def", "_read_generic_csv", "(", "file", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "file", ",", "skiprows", "=", "1", ",", "na_values", "=", "\"--\"", ",", "names", "=", "[", "\"rsid\"", ",", "\"chrom\"", ",", "\"pos\"", ",", "\"genotype\"", "]", ",", "index_col", "=", "0", ",", "dtype", "=", "{", "\"chrom\"", ":", "object", ",", "\"pos\"", ":", "np", ".", "int64", "}", ",", ")", "return", "sort_snps", "(", "df", ")", ",", "\"generic\"" ]
Read and parse generic CSV file. Notes ----- Assumes columns are 'rsid', 'chrom' / 'chromosome', 'pos' / 'position', and 'genotype'; values are comma separated; unreported genotypes are indicated by '--'; and one header row precedes data. For example: rsid,chromosome,position,genotype rs1,1,1,AA rs2,1,2,CC rs3,1,3,-- Parameters ---------- file : str path to file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source
[ "Read", "and", "parse", "generic", "CSV", "file", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L390-L425
5,575
apriha/lineage
src/lineage/snps.py
SNPs._assign_par_snps
def _assign_par_snps(self): """ Assign PAR SNPs to the X or Y chromosome using SNP position. References ----- ..[1] National Center for Biotechnology Information, Variation Services, RefSNP, https://api.ncbi.nlm.nih.gov/variation/v0/ ..[2] Yates et. al. (doi:10.1093/bioinformatics/btu613), http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613 ..[3] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 ..[4] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K. dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1; 29(1):308-11. ..[5] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center for Biotechnology Information, National Library of Medicine. dbSNP accession: rs28736870, rs113313554, and rs758419898 (dbSNP Build ID: 151). Available from: http://www.ncbi.nlm.nih.gov/SNP/ """ rest_client = EnsemblRestClient(server="https://api.ncbi.nlm.nih.gov") for rsid in self.snps.loc[self.snps["chrom"] == "PAR"].index.values: if "rs" in rsid: try: id = rsid.split("rs")[1] response = rest_client.perform_rest_action( "/variation/v0/beta/refsnp/" + id ) if response is not None: for item in response["primary_snapshot_data"][ "placements_with_allele" ]: if "NC_000023" in item["seq_id"]: assigned = self._assign_snp(rsid, item["alleles"], "X") elif "NC_000024" in item["seq_id"]: assigned = self._assign_snp(rsid, item["alleles"], "Y") else: assigned = False if assigned: if not self.build_detected: self.build = self._extract_build(item) self.build_detected = True continue except Exception as err: print(err)
python
def _assign_par_snps(self): """ Assign PAR SNPs to the X or Y chromosome using SNP position. References ----- ..[1] National Center for Biotechnology Information, Variation Services, RefSNP, https://api.ncbi.nlm.nih.gov/variation/v0/ ..[2] Yates et. al. (doi:10.1093/bioinformatics/btu613), http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613 ..[3] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 ..[4] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K. dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1; 29(1):308-11. ..[5] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center for Biotechnology Information, National Library of Medicine. dbSNP accession: rs28736870, rs113313554, and rs758419898 (dbSNP Build ID: 151). Available from: http://www.ncbi.nlm.nih.gov/SNP/ """ rest_client = EnsemblRestClient(server="https://api.ncbi.nlm.nih.gov") for rsid in self.snps.loc[self.snps["chrom"] == "PAR"].index.values: if "rs" in rsid: try: id = rsid.split("rs")[1] response = rest_client.perform_rest_action( "/variation/v0/beta/refsnp/" + id ) if response is not None: for item in response["primary_snapshot_data"][ "placements_with_allele" ]: if "NC_000023" in item["seq_id"]: assigned = self._assign_snp(rsid, item["alleles"], "X") elif "NC_000024" in item["seq_id"]: assigned = self._assign_snp(rsid, item["alleles"], "Y") else: assigned = False if assigned: if not self.build_detected: self.build = self._extract_build(item) self.build_detected = True continue except Exception as err: print(err)
[ "def", "_assign_par_snps", "(", "self", ")", ":", "rest_client", "=", "EnsemblRestClient", "(", "server", "=", "\"https://api.ncbi.nlm.nih.gov\"", ")", "for", "rsid", "in", "self", ".", "snps", ".", "loc", "[", "self", ".", "snps", "[", "\"chrom\"", "]", "==", "\"PAR\"", "]", ".", "index", ".", "values", ":", "if", "\"rs\"", "in", "rsid", ":", "try", ":", "id", "=", "rsid", ".", "split", "(", "\"rs\"", ")", "[", "1", "]", "response", "=", "rest_client", ".", "perform_rest_action", "(", "\"/variation/v0/beta/refsnp/\"", "+", "id", ")", "if", "response", "is", "not", "None", ":", "for", "item", "in", "response", "[", "\"primary_snapshot_data\"", "]", "[", "\"placements_with_allele\"", "]", ":", "if", "\"NC_000023\"", "in", "item", "[", "\"seq_id\"", "]", ":", "assigned", "=", "self", ".", "_assign_snp", "(", "rsid", ",", "item", "[", "\"alleles\"", "]", ",", "\"X\"", ")", "elif", "\"NC_000024\"", "in", "item", "[", "\"seq_id\"", "]", ":", "assigned", "=", "self", ".", "_assign_snp", "(", "rsid", ",", "item", "[", "\"alleles\"", "]", ",", "\"Y\"", ")", "else", ":", "assigned", "=", "False", "if", "assigned", ":", "if", "not", "self", ".", "build_detected", ":", "self", ".", "build", "=", "self", ".", "_extract_build", "(", "item", ")", "self", ".", "build_detected", "=", "True", "continue", "except", "Exception", "as", "err", ":", "print", "(", "err", ")" ]
Assign PAR SNPs to the X or Y chromosome using SNP position. References ----- ..[1] National Center for Biotechnology Information, Variation Services, RefSNP, https://api.ncbi.nlm.nih.gov/variation/v0/ ..[2] Yates et. al. (doi:10.1093/bioinformatics/btu613), http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613 ..[3] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098 ..[4] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K. dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1; 29(1):308-11. ..[5] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center for Biotechnology Information, National Library of Medicine. dbSNP accession: rs28736870, rs113313554, and rs758419898 (dbSNP Build ID: 151). Available from: http://www.ncbi.nlm.nih.gov/SNP/
[ "Assign", "PAR", "SNPs", "to", "the", "X", "or", "Y", "chromosome", "using", "SNP", "position", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L427-L472
5,576
apriha/lineage
src/lineage/visualization.py
plot_chromosomes
def plot_chromosomes(one_chrom_match, two_chrom_match, cytobands, path, title, build): """ Plots chromosomes with designated markers. Parameters ---------- one_chrom_match : list of dicts segments to highlight on the chromosomes representing one shared chromosome two_chrom_match : list of dicts segments to highlight on the chromosomes representing two shared chromosomes cytobands : pandas.DataFrame cytobands table loaded with Resources path : str path to destination `.png` file title : str title for plot build : {37} human genome build """ # Height of each chromosome chrom_height = 1.25 # Spacing between consecutive chromosomes chrom_spacing = 1 # Decide which chromosomes to use chromosome_list = ["chr%s" % i for i in range(1, 23)] chromosome_list.append("chrY") chromosome_list.append("chrX") # Keep track of the y positions for chromosomes, and the center of each chromosome # (which is where we'll put the ytick labels) ybase = 0 chrom_ybase = {} chrom_centers = {} # Iterate in reverse so that items in the beginning of `chromosome_list` will # appear at the top of the plot for chrom in chromosome_list[::-1]: chrom_ybase[chrom] = ybase chrom_centers[chrom] = ybase + chrom_height / 2.0 ybase += chrom_height + chrom_spacing # Colors for different chromosome stains color_lookup = { "gneg": (202 / 255, 202 / 255, 202 / 255), # background "one_chrom": (0 / 255, 176 / 255, 240 / 255), "two_chrom": (66 / 255, 69 / 255, 121 / 255), "centromere": (1, 1, 1, 0.6), } df = _patch_chromosomal_features(cytobands, one_chrom_match, two_chrom_match) # Add a new column for colors df["colors"] = df["gie_stain"].apply(lambda x: color_lookup[x]) # Width, height (in inches) figsize = (6.5, 9) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) # Now all we have to do is call our function for the chromosome data... for collection in _chromosome_collections(df, chrom_ybase, chrom_height): ax.add_collection(collection) # Axes tweaking ax.set_yticks([chrom_centers[i] for i in chromosome_list]) ax.set_yticklabels(chromosome_list) ax.margins(0.01) ax.axis("tight") handles = [] # setup legend if len(one_chrom_match) > 0: one_chrom_patch = patches.Patch( color=color_lookup["one_chrom"], label="One chromosome shared" ) handles.append(one_chrom_patch) if len(two_chrom_match) > 0: two_chrom_patch = patches.Patch( color=color_lookup["two_chrom"], label="Two chromosomes shared" ) handles.append(two_chrom_patch) no_match_patch = patches.Patch(color=color_lookup["gneg"], label="No shared DNA") handles.append(no_match_patch) centromere_patch = patches.Patch( color=(234 / 255, 234 / 255, 234 / 255), label="Centromere" ) handles.append(centromere_patch) plt.legend(handles=handles, loc="lower right", bbox_to_anchor=(0.95, 0.05)) ax.set_title(title, fontsize=14, fontweight="bold") plt.xlabel("Build " + str(build) + " Chromosome Position", fontsize=10) print("Saving " + os.path.relpath(path)) plt.tight_layout() plt.savefig(path)
python
def plot_chromosomes(one_chrom_match, two_chrom_match, cytobands, path, title, build): """ Plots chromosomes with designated markers. Parameters ---------- one_chrom_match : list of dicts segments to highlight on the chromosomes representing one shared chromosome two_chrom_match : list of dicts segments to highlight on the chromosomes representing two shared chromosomes cytobands : pandas.DataFrame cytobands table loaded with Resources path : str path to destination `.png` file title : str title for plot build : {37} human genome build """ # Height of each chromosome chrom_height = 1.25 # Spacing between consecutive chromosomes chrom_spacing = 1 # Decide which chromosomes to use chromosome_list = ["chr%s" % i for i in range(1, 23)] chromosome_list.append("chrY") chromosome_list.append("chrX") # Keep track of the y positions for chromosomes, and the center of each chromosome # (which is where we'll put the ytick labels) ybase = 0 chrom_ybase = {} chrom_centers = {} # Iterate in reverse so that items in the beginning of `chromosome_list` will # appear at the top of the plot for chrom in chromosome_list[::-1]: chrom_ybase[chrom] = ybase chrom_centers[chrom] = ybase + chrom_height / 2.0 ybase += chrom_height + chrom_spacing # Colors for different chromosome stains color_lookup = { "gneg": (202 / 255, 202 / 255, 202 / 255), # background "one_chrom": (0 / 255, 176 / 255, 240 / 255), "two_chrom": (66 / 255, 69 / 255, 121 / 255), "centromere": (1, 1, 1, 0.6), } df = _patch_chromosomal_features(cytobands, one_chrom_match, two_chrom_match) # Add a new column for colors df["colors"] = df["gie_stain"].apply(lambda x: color_lookup[x]) # Width, height (in inches) figsize = (6.5, 9) fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) # Now all we have to do is call our function for the chromosome data... for collection in _chromosome_collections(df, chrom_ybase, chrom_height): ax.add_collection(collection) # Axes tweaking ax.set_yticks([chrom_centers[i] for i in chromosome_list]) ax.set_yticklabels(chromosome_list) ax.margins(0.01) ax.axis("tight") handles = [] # setup legend if len(one_chrom_match) > 0: one_chrom_patch = patches.Patch( color=color_lookup["one_chrom"], label="One chromosome shared" ) handles.append(one_chrom_patch) if len(two_chrom_match) > 0: two_chrom_patch = patches.Patch( color=color_lookup["two_chrom"], label="Two chromosomes shared" ) handles.append(two_chrom_patch) no_match_patch = patches.Patch(color=color_lookup["gneg"], label="No shared DNA") handles.append(no_match_patch) centromere_patch = patches.Patch( color=(234 / 255, 234 / 255, 234 / 255), label="Centromere" ) handles.append(centromere_patch) plt.legend(handles=handles, loc="lower right", bbox_to_anchor=(0.95, 0.05)) ax.set_title(title, fontsize=14, fontweight="bold") plt.xlabel("Build " + str(build) + " Chromosome Position", fontsize=10) print("Saving " + os.path.relpath(path)) plt.tight_layout() plt.savefig(path)
[ "def", "plot_chromosomes", "(", "one_chrom_match", ",", "two_chrom_match", ",", "cytobands", ",", "path", ",", "title", ",", "build", ")", ":", "# Height of each chromosome", "chrom_height", "=", "1.25", "# Spacing between consecutive chromosomes", "chrom_spacing", "=", "1", "# Decide which chromosomes to use", "chromosome_list", "=", "[", "\"chr%s\"", "%", "i", "for", "i", "in", "range", "(", "1", ",", "23", ")", "]", "chromosome_list", ".", "append", "(", "\"chrY\"", ")", "chromosome_list", ".", "append", "(", "\"chrX\"", ")", "# Keep track of the y positions for chromosomes, and the center of each chromosome", "# (which is where we'll put the ytick labels)", "ybase", "=", "0", "chrom_ybase", "=", "{", "}", "chrom_centers", "=", "{", "}", "# Iterate in reverse so that items in the beginning of `chromosome_list` will", "# appear at the top of the plot", "for", "chrom", "in", "chromosome_list", "[", ":", ":", "-", "1", "]", ":", "chrom_ybase", "[", "chrom", "]", "=", "ybase", "chrom_centers", "[", "chrom", "]", "=", "ybase", "+", "chrom_height", "/", "2.0", "ybase", "+=", "chrom_height", "+", "chrom_spacing", "# Colors for different chromosome stains", "color_lookup", "=", "{", "\"gneg\"", ":", "(", "202", "/", "255", ",", "202", "/", "255", ",", "202", "/", "255", ")", ",", "# background", "\"one_chrom\"", ":", "(", "0", "/", "255", ",", "176", "/", "255", ",", "240", "/", "255", ")", ",", "\"two_chrom\"", ":", "(", "66", "/", "255", ",", "69", "/", "255", ",", "121", "/", "255", ")", ",", "\"centromere\"", ":", "(", "1", ",", "1", ",", "1", ",", "0.6", ")", ",", "}", "df", "=", "_patch_chromosomal_features", "(", "cytobands", ",", "one_chrom_match", ",", "two_chrom_match", ")", "# Add a new column for colors", "df", "[", "\"colors\"", "]", "=", "df", "[", "\"gie_stain\"", "]", ".", "apply", "(", "lambda", "x", ":", "color_lookup", "[", "x", "]", ")", "# Width, height (in inches)", "figsize", "=", "(", "6.5", ",", "9", ")", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ")", "# Now all we have to do is call our function for the chromosome data...", "for", "collection", "in", "_chromosome_collections", "(", "df", ",", "chrom_ybase", ",", "chrom_height", ")", ":", "ax", ".", "add_collection", "(", "collection", ")", "# Axes tweaking", "ax", ".", "set_yticks", "(", "[", "chrom_centers", "[", "i", "]", "for", "i", "in", "chromosome_list", "]", ")", "ax", ".", "set_yticklabels", "(", "chromosome_list", ")", "ax", ".", "margins", "(", "0.01", ")", "ax", ".", "axis", "(", "\"tight\"", ")", "handles", "=", "[", "]", "# setup legend", "if", "len", "(", "one_chrom_match", ")", ">", "0", ":", "one_chrom_patch", "=", "patches", ".", "Patch", "(", "color", "=", "color_lookup", "[", "\"one_chrom\"", "]", ",", "label", "=", "\"One chromosome shared\"", ")", "handles", ".", "append", "(", "one_chrom_patch", ")", "if", "len", "(", "two_chrom_match", ")", ">", "0", ":", "two_chrom_patch", "=", "patches", ".", "Patch", "(", "color", "=", "color_lookup", "[", "\"two_chrom\"", "]", ",", "label", "=", "\"Two chromosomes shared\"", ")", "handles", ".", "append", "(", "two_chrom_patch", ")", "no_match_patch", "=", "patches", ".", "Patch", "(", "color", "=", "color_lookup", "[", "\"gneg\"", "]", ",", "label", "=", "\"No shared DNA\"", ")", "handles", ".", "append", "(", "no_match_patch", ")", "centromere_patch", "=", "patches", ".", "Patch", "(", "color", "=", "(", "234", "/", "255", ",", "234", "/", "255", ",", "234", "/", "255", ")", ",", "label", "=", "\"Centromere\"", ")", "handles", ".", "append", "(", "centromere_patch", ")", "plt", ".", "legend", "(", "handles", "=", "handles", ",", "loc", "=", "\"lower right\"", ",", "bbox_to_anchor", "=", "(", "0.95", ",", "0.05", ")", ")", "ax", ".", "set_title", "(", "title", ",", "fontsize", "=", "14", ",", "fontweight", "=", "\"bold\"", ")", "plt", ".", "xlabel", "(", "\"Build \"", "+", "str", "(", "build", ")", "+", "\" Chromosome Position\"", ",", "fontsize", "=", "10", ")", "print", "(", "\"Saving \"", "+", "os", ".", "path", ".", "relpath", "(", "path", ")", ")", "plt", ".", "tight_layout", "(", ")", "plt", ".", "savefig", "(", "path", ")" ]
Plots chromosomes with designated markers. Parameters ---------- one_chrom_match : list of dicts segments to highlight on the chromosomes representing one shared chromosome two_chrom_match : list of dicts segments to highlight on the chromosomes representing two shared chromosomes cytobands : pandas.DataFrame cytobands table loaded with Resources path : str path to destination `.png` file title : str title for plot build : {37} human genome build
[ "Plots", "chromosomes", "with", "designated", "markers", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/visualization.py#L69-L169
5,577
apriha/lineage
src/lineage/__init__.py
create_dir
def create_dir(path): """ Create directory specified by `path` if it doesn't already exist. Parameters ---------- path : str path to directory Returns ------- bool True if `path` exists """ # https://stackoverflow.com/a/5032238 try: os.makedirs(path, exist_ok=True) except Exception as err: print(err) return False if os.path.exists(path): return True else: return False
python
def create_dir(path): """ Create directory specified by `path` if it doesn't already exist. Parameters ---------- path : str path to directory Returns ------- bool True if `path` exists """ # https://stackoverflow.com/a/5032238 try: os.makedirs(path, exist_ok=True) except Exception as err: print(err) return False if os.path.exists(path): return True else: return False
[ "def", "create_dir", "(", "path", ")", ":", "# https://stackoverflow.com/a/5032238", "try", ":", "os", ".", "makedirs", "(", "path", ",", "exist_ok", "=", "True", ")", "except", "Exception", "as", "err", ":", "print", "(", "err", ")", "return", "False", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "True", "else", ":", "return", "False" ]
Create directory specified by `path` if it doesn't already exist. Parameters ---------- path : str path to directory Returns ------- bool True if `path` exists
[ "Create", "directory", "specified", "by", "path", "if", "it", "doesn", "t", "already", "exist", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/__init__.py#L859-L882
5,578
apriha/lineage
src/lineage/__init__.py
save_df_as_csv
def save_df_as_csv(df, path, filename, comment=None, **kwargs): """ Save dataframe to a CSV file. Parameters ---------- df : pandas.DataFrame dataframe to save path : str path to directory where to save CSV file filename : str filename of CSV file comment : str header comment(s); one or more lines starting with '#' **kwargs additional parameters to `pandas.DataFrame.to_csv` Returns ------- str path to saved file, else empty str """ if isinstance(df, pd.DataFrame) and len(df) > 0: try: if not create_dir(path): return "" destination = os.path.join(path, filename) print("Saving " + os.path.relpath(destination)) s = ( "# Generated by lineage v{}, https://github.com/apriha/lineage\n" "# Generated at {} UTC\n" ) s = s.format( __version__, datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") ) if isinstance(comment, str): s += comment with open(destination, "w") as f: f.write(s) # https://stackoverflow.com/a/29233924/4727627 with open(destination, "a") as f: df.to_csv(f, na_rep="--", **kwargs) return destination except Exception as err: print(err) return "" else: print("no data to save...") return ""
python
def save_df_as_csv(df, path, filename, comment=None, **kwargs): """ Save dataframe to a CSV file. Parameters ---------- df : pandas.DataFrame dataframe to save path : str path to directory where to save CSV file filename : str filename of CSV file comment : str header comment(s); one or more lines starting with '#' **kwargs additional parameters to `pandas.DataFrame.to_csv` Returns ------- str path to saved file, else empty str """ if isinstance(df, pd.DataFrame) and len(df) > 0: try: if not create_dir(path): return "" destination = os.path.join(path, filename) print("Saving " + os.path.relpath(destination)) s = ( "# Generated by lineage v{}, https://github.com/apriha/lineage\n" "# Generated at {} UTC\n" ) s = s.format( __version__, datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") ) if isinstance(comment, str): s += comment with open(destination, "w") as f: f.write(s) # https://stackoverflow.com/a/29233924/4727627 with open(destination, "a") as f: df.to_csv(f, na_rep="--", **kwargs) return destination except Exception as err: print(err) return "" else: print("no data to save...") return ""
[ "def", "save_df_as_csv", "(", "df", ",", "path", ",", "filename", ",", "comment", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "df", ",", "pd", ".", "DataFrame", ")", "and", "len", "(", "df", ")", ">", "0", ":", "try", ":", "if", "not", "create_dir", "(", "path", ")", ":", "return", "\"\"", "destination", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "print", "(", "\"Saving \"", "+", "os", ".", "path", ".", "relpath", "(", "destination", ")", ")", "s", "=", "(", "\"# Generated by lineage v{}, https://github.com/apriha/lineage\\n\"", "\"# Generated at {} UTC\\n\"", ")", "s", "=", "s", ".", "format", "(", "__version__", ",", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", ")", "if", "isinstance", "(", "comment", ",", "str", ")", ":", "s", "+=", "comment", "with", "open", "(", "destination", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "s", ")", "# https://stackoverflow.com/a/29233924/4727627", "with", "open", "(", "destination", ",", "\"a\"", ")", "as", "f", ":", "df", ".", "to_csv", "(", "f", ",", "na_rep", "=", "\"--\"", ",", "*", "*", "kwargs", ")", "return", "destination", "except", "Exception", "as", "err", ":", "print", "(", "err", ")", "return", "\"\"", "else", ":", "print", "(", "\"no data to save...\"", ")", "return", "\"\"" ]
Save dataframe to a CSV file. Parameters ---------- df : pandas.DataFrame dataframe to save path : str path to directory where to save CSV file filename : str filename of CSV file comment : str header comment(s); one or more lines starting with '#' **kwargs additional parameters to `pandas.DataFrame.to_csv` Returns ------- str path to saved file, else empty str
[ "Save", "dataframe", "to", "a", "CSV", "file", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/__init__.py#L885-L940
5,579
apriha/lineage
src/lineage/resources.py
Resources.get_genetic_map_HapMapII_GRCh37
def get_genetic_map_HapMapII_GRCh37(self): """ Get International HapMap Consortium HapMap Phase II genetic map for Build 37. Returns ------- dict dict of pandas.DataFrame HapMapII genetic maps if loading was successful, else None """ if self._genetic_map_HapMapII_GRCh37 is None: self._genetic_map_HapMapII_GRCh37 = self._load_genetic_map( self._get_path_genetic_map_HapMapII_GRCh37() ) return self._genetic_map_HapMapII_GRCh37
python
def get_genetic_map_HapMapII_GRCh37(self): """ Get International HapMap Consortium HapMap Phase II genetic map for Build 37. Returns ------- dict dict of pandas.DataFrame HapMapII genetic maps if loading was successful, else None """ if self._genetic_map_HapMapII_GRCh37 is None: self._genetic_map_HapMapII_GRCh37 = self._load_genetic_map( self._get_path_genetic_map_HapMapII_GRCh37() ) return self._genetic_map_HapMapII_GRCh37
[ "def", "get_genetic_map_HapMapII_GRCh37", "(", "self", ")", ":", "if", "self", ".", "_genetic_map_HapMapII_GRCh37", "is", "None", ":", "self", ".", "_genetic_map_HapMapII_GRCh37", "=", "self", ".", "_load_genetic_map", "(", "self", ".", "_get_path_genetic_map_HapMapII_GRCh37", "(", ")", ")", "return", "self", ".", "_genetic_map_HapMapII_GRCh37" ]
Get International HapMap Consortium HapMap Phase II genetic map for Build 37. Returns ------- dict dict of pandas.DataFrame HapMapII genetic maps if loading was successful, else None
[ "Get", "International", "HapMap", "Consortium", "HapMap", "Phase", "II", "genetic", "map", "for", "Build", "37", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L83-L96
5,580
apriha/lineage
src/lineage/resources.py
Resources.get_cytoBand_hg19
def get_cytoBand_hg19(self): """ Get UCSC cytoBand table for Build 37. Returns ------- pandas.DataFrame cytoBand table if loading was successful, else None """ if self._cytoBand_hg19 is None: self._cytoBand_hg19 = self._load_cytoBand(self._get_path_cytoBand_hg19()) return self._cytoBand_hg19
python
def get_cytoBand_hg19(self): """ Get UCSC cytoBand table for Build 37. Returns ------- pandas.DataFrame cytoBand table if loading was successful, else None """ if self._cytoBand_hg19 is None: self._cytoBand_hg19 = self._load_cytoBand(self._get_path_cytoBand_hg19()) return self._cytoBand_hg19
[ "def", "get_cytoBand_hg19", "(", "self", ")", ":", "if", "self", ".", "_cytoBand_hg19", "is", "None", ":", "self", ".", "_cytoBand_hg19", "=", "self", ".", "_load_cytoBand", "(", "self", ".", "_get_path_cytoBand_hg19", "(", ")", ")", "return", "self", ".", "_cytoBand_hg19" ]
Get UCSC cytoBand table for Build 37. Returns ------- pandas.DataFrame cytoBand table if loading was successful, else None
[ "Get", "UCSC", "cytoBand", "table", "for", "Build", "37", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L98-L109
5,581
apriha/lineage
src/lineage/resources.py
Resources.get_knownGene_hg19
def get_knownGene_hg19(self): """ Get UCSC knownGene table for Build 37. Returns ------- pandas.DataFrame knownGene table if loading was successful, else None """ if self._knownGene_hg19 is None: self._knownGene_hg19 = self._load_knownGene(self._get_path_knownGene_hg19()) return self._knownGene_hg19
python
def get_knownGene_hg19(self): """ Get UCSC knownGene table for Build 37. Returns ------- pandas.DataFrame knownGene table if loading was successful, else None """ if self._knownGene_hg19 is None: self._knownGene_hg19 = self._load_knownGene(self._get_path_knownGene_hg19()) return self._knownGene_hg19
[ "def", "get_knownGene_hg19", "(", "self", ")", ":", "if", "self", ".", "_knownGene_hg19", "is", "None", ":", "self", ".", "_knownGene_hg19", "=", "self", ".", "_load_knownGene", "(", "self", ".", "_get_path_knownGene_hg19", "(", ")", ")", "return", "self", ".", "_knownGene_hg19" ]
Get UCSC knownGene table for Build 37. Returns ------- pandas.DataFrame knownGene table if loading was successful, else None
[ "Get", "UCSC", "knownGene", "table", "for", "Build", "37", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L111-L122
5,582
apriha/lineage
src/lineage/resources.py
Resources.get_kgXref_hg19
def get_kgXref_hg19(self): """ Get UCSC kgXref table for Build 37. Returns ------- pandas.DataFrame kgXref table if loading was successful, else None """ if self._kgXref_hg19 is None: self._kgXref_hg19 = self._load_kgXref(self._get_path_kgXref_hg19()) return self._kgXref_hg19
python
def get_kgXref_hg19(self): """ Get UCSC kgXref table for Build 37. Returns ------- pandas.DataFrame kgXref table if loading was successful, else None """ if self._kgXref_hg19 is None: self._kgXref_hg19 = self._load_kgXref(self._get_path_kgXref_hg19()) return self._kgXref_hg19
[ "def", "get_kgXref_hg19", "(", "self", ")", ":", "if", "self", ".", "_kgXref_hg19", "is", "None", ":", "self", ".", "_kgXref_hg19", "=", "self", ".", "_load_kgXref", "(", "self", ".", "_get_path_kgXref_hg19", "(", ")", ")", "return", "self", ".", "_kgXref_hg19" ]
Get UCSC kgXref table for Build 37. Returns ------- pandas.DataFrame kgXref table if loading was successful, else None
[ "Get", "UCSC", "kgXref", "table", "for", "Build", "37", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L124-L135
5,583
apriha/lineage
src/lineage/resources.py
Resources.get_assembly_mapping_data
def get_assembly_mapping_data(self, source_assembly, target_assembly): """ Get assembly mapping data. Parameters ---------- source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap from target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap to Returns ------- dict dict of json assembly mapping data if loading was successful, else None """ return self._load_assembly_mapping_data( self._get_path_assembly_mapping_data(source_assembly, target_assembly) )
python
def get_assembly_mapping_data(self, source_assembly, target_assembly): """ Get assembly mapping data. Parameters ---------- source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap from target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap to Returns ------- dict dict of json assembly mapping data if loading was successful, else None """ return self._load_assembly_mapping_data( self._get_path_assembly_mapping_data(source_assembly, target_assembly) )
[ "def", "get_assembly_mapping_data", "(", "self", ",", "source_assembly", ",", "target_assembly", ")", ":", "return", "self", ".", "_load_assembly_mapping_data", "(", "self", ".", "_get_path_assembly_mapping_data", "(", "source_assembly", ",", "target_assembly", ")", ")" ]
Get assembly mapping data. Parameters ---------- source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap from target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap to Returns ------- dict dict of json assembly mapping data if loading was successful, else None
[ "Get", "assembly", "mapping", "data", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L137-L154
5,584
apriha/lineage
src/lineage/resources.py
Resources._load_assembly_mapping_data
def _load_assembly_mapping_data(filename): """ Load assembly mapping data. Parameters ---------- filename : str path to compressed archive with assembly mapping data Returns ------- assembly_mapping_data : dict dict of assembly maps if loading was successful, else None Notes ----- Keys of returned dict are chromosomes and values are the corresponding assembly map. """ try: assembly_mapping_data = {} with tarfile.open(filename, "r") as tar: # http://stackoverflow.com/a/2018576 for member in tar.getmembers(): if ".json" in member.name: with tar.extractfile(member) as tar_file: tar_bytes = tar_file.read() # https://stackoverflow.com/a/42683509/4727627 assembly_mapping_data[member.name.split(".")[0]] = json.loads( tar_bytes.decode("utf-8") ) return assembly_mapping_data except Exception as err: print(err) return None
python
def _load_assembly_mapping_data(filename): """ Load assembly mapping data. Parameters ---------- filename : str path to compressed archive with assembly mapping data Returns ------- assembly_mapping_data : dict dict of assembly maps if loading was successful, else None Notes ----- Keys of returned dict are chromosomes and values are the corresponding assembly map. """ try: assembly_mapping_data = {} with tarfile.open(filename, "r") as tar: # http://stackoverflow.com/a/2018576 for member in tar.getmembers(): if ".json" in member.name: with tar.extractfile(member) as tar_file: tar_bytes = tar_file.read() # https://stackoverflow.com/a/42683509/4727627 assembly_mapping_data[member.name.split(".")[0]] = json.loads( tar_bytes.decode("utf-8") ) return assembly_mapping_data except Exception as err: print(err) return None
[ "def", "_load_assembly_mapping_data", "(", "filename", ")", ":", "try", ":", "assembly_mapping_data", "=", "{", "}", "with", "tarfile", ".", "open", "(", "filename", ",", "\"r\"", ")", "as", "tar", ":", "# http://stackoverflow.com/a/2018576", "for", "member", "in", "tar", ".", "getmembers", "(", ")", ":", "if", "\".json\"", "in", "member", ".", "name", ":", "with", "tar", ".", "extractfile", "(", "member", ")", "as", "tar_file", ":", "tar_bytes", "=", "tar_file", ".", "read", "(", ")", "# https://stackoverflow.com/a/42683509/4727627", "assembly_mapping_data", "[", "member", ".", "name", ".", "split", "(", "\".\"", ")", "[", "0", "]", "]", "=", "json", ".", "loads", "(", "tar_bytes", ".", "decode", "(", "\"utf-8\"", ")", ")", "return", "assembly_mapping_data", "except", "Exception", "as", "err", ":", "print", "(", "err", ")", "return", "None" ]
Load assembly mapping data. Parameters ---------- filename : str path to compressed archive with assembly mapping data Returns ------- assembly_mapping_data : dict dict of assembly maps if loading was successful, else None Notes ----- Keys of returned dict are chromosomes and values are the corresponding assembly map.
[ "Load", "assembly", "mapping", "data", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L312-L346
5,585
apriha/lineage
src/lineage/resources.py
Resources._load_cytoBand
def _load_cytoBand(filename): """ Load UCSC cytoBand table. Parameters ---------- filename : str path to cytoBand file Returns ------- df : pandas.DataFrame cytoBand table if loading was successful, else None References ---------- ..[1] Ryan Dale, GitHub Gist, https://gist.github.com/daler/c98fc410282d7570efc3#file-ideograms-py """ try: # adapted from chromosome plotting code (see [1]_) df = pd.read_table( filename, names=["chrom", "start", "end", "name", "gie_stain"] ) df["chrom"] = df["chrom"].str[3:] return df except Exception as err: print(err) return None
python
def _load_cytoBand(filename): """ Load UCSC cytoBand table. Parameters ---------- filename : str path to cytoBand file Returns ------- df : pandas.DataFrame cytoBand table if loading was successful, else None References ---------- ..[1] Ryan Dale, GitHub Gist, https://gist.github.com/daler/c98fc410282d7570efc3#file-ideograms-py """ try: # adapted from chromosome plotting code (see [1]_) df = pd.read_table( filename, names=["chrom", "start", "end", "name", "gie_stain"] ) df["chrom"] = df["chrom"].str[3:] return df except Exception as err: print(err) return None
[ "def", "_load_cytoBand", "(", "filename", ")", ":", "try", ":", "# adapted from chromosome plotting code (see [1]_)", "df", "=", "pd", ".", "read_table", "(", "filename", ",", "names", "=", "[", "\"chrom\"", ",", "\"start\"", ",", "\"end\"", ",", "\"name\"", ",", "\"gie_stain\"", "]", ")", "df", "[", "\"chrom\"", "]", "=", "df", "[", "\"chrom\"", "]", ".", "str", "[", "3", ":", "]", "return", "df", "except", "Exception", "as", "err", ":", "print", "(", "err", ")", "return", "None" ]
Load UCSC cytoBand table. Parameters ---------- filename : str path to cytoBand file Returns ------- df : pandas.DataFrame cytoBand table if loading was successful, else None References ---------- ..[1] Ryan Dale, GitHub Gist, https://gist.github.com/daler/c98fc410282d7570efc3#file-ideograms-py
[ "Load", "UCSC", "cytoBand", "table", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L349-L376
5,586
apriha/lineage
src/lineage/resources.py
Resources._load_knownGene
def _load_knownGene(filename): """ Load UCSC knownGene table. Parameters ---------- filename : str path to knownGene file Returns ------- df : pandas.DataFrame knownGene table if loading was successful, else None """ try: df = pd.read_table( filename, names=[ "name", "chrom", "strand", "txStart", "txEnd", "cdsStart", "cdsEnd", "exonCount", "exonStarts", "exonEnds", "proteinID", "alignID", ], index_col=0, ) df["chrom"] = df["chrom"].str[3:] return df except Exception as err: print(err) return None
python
def _load_knownGene(filename): """ Load UCSC knownGene table. Parameters ---------- filename : str path to knownGene file Returns ------- df : pandas.DataFrame knownGene table if loading was successful, else None """ try: df = pd.read_table( filename, names=[ "name", "chrom", "strand", "txStart", "txEnd", "cdsStart", "cdsEnd", "exonCount", "exonStarts", "exonEnds", "proteinID", "alignID", ], index_col=0, ) df["chrom"] = df["chrom"].str[3:] return df except Exception as err: print(err) return None
[ "def", "_load_knownGene", "(", "filename", ")", ":", "try", ":", "df", "=", "pd", ".", "read_table", "(", "filename", ",", "names", "=", "[", "\"name\"", ",", "\"chrom\"", ",", "\"strand\"", ",", "\"txStart\"", ",", "\"txEnd\"", ",", "\"cdsStart\"", ",", "\"cdsEnd\"", ",", "\"exonCount\"", ",", "\"exonStarts\"", ",", "\"exonEnds\"", ",", "\"proteinID\"", ",", "\"alignID\"", ",", "]", ",", "index_col", "=", "0", ",", ")", "df", "[", "\"chrom\"", "]", "=", "df", "[", "\"chrom\"", "]", ".", "str", "[", "3", ":", "]", "return", "df", "except", "Exception", "as", "err", ":", "print", "(", "err", ")", "return", "None" ]
Load UCSC knownGene table. Parameters ---------- filename : str path to knownGene file Returns ------- df : pandas.DataFrame knownGene table if loading was successful, else None
[ "Load", "UCSC", "knownGene", "table", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L379-L415
5,587
apriha/lineage
src/lineage/resources.py
Resources._load_kgXref
def _load_kgXref(filename): """ Load UCSC kgXref table. Parameters ---------- filename : str path to kgXref file Returns ------- df : pandas.DataFrame kgXref table if loading was successful, else None """ try: df = pd.read_table( filename, names=[ "kgID", "mRNA", "spID", "spDisplayID", "geneSymbol", "refseq", "protAcc", "description", "rfamAcc", "tRnaName", ], index_col=0, dtype=object, ) return df except Exception as err: print(err) return None
python
def _load_kgXref(filename): """ Load UCSC kgXref table. Parameters ---------- filename : str path to kgXref file Returns ------- df : pandas.DataFrame kgXref table if loading was successful, else None """ try: df = pd.read_table( filename, names=[ "kgID", "mRNA", "spID", "spDisplayID", "geneSymbol", "refseq", "protAcc", "description", "rfamAcc", "tRnaName", ], index_col=0, dtype=object, ) return df except Exception as err: print(err) return None
[ "def", "_load_kgXref", "(", "filename", ")", ":", "try", ":", "df", "=", "pd", ".", "read_table", "(", "filename", ",", "names", "=", "[", "\"kgID\"", ",", "\"mRNA\"", ",", "\"spID\"", ",", "\"spDisplayID\"", ",", "\"geneSymbol\"", ",", "\"refseq\"", ",", "\"protAcc\"", ",", "\"description\"", ",", "\"rfamAcc\"", ",", "\"tRnaName\"", ",", "]", ",", "index_col", "=", "0", ",", "dtype", "=", "object", ",", ")", "return", "df", "except", "Exception", "as", "err", ":", "print", "(", "err", ")", "return", "None" ]
Load UCSC kgXref table. Parameters ---------- filename : str path to kgXref file Returns ------- df : pandas.DataFrame kgXref table if loading was successful, else None
[ "Load", "UCSC", "kgXref", "table", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L418-L452
5,588
apriha/lineage
src/lineage/resources.py
Resources._get_path_assembly_mapping_data
def _get_path_assembly_mapping_data( self, source_assembly, target_assembly, retries=10 ): """ Get local path to assembly mapping data, downloading if necessary. Parameters ---------- source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap from target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap to retries : int number of retries per chromosome to download assembly mapping data Returns ------- str path to <source_assembly>_<target_assembly>.tar.gz References ---------- ..[1] Ensembl, Assembly Information Endpoint, https://rest.ensembl.org/documentation/info/assembly_info ..[2] Ensembl, Assembly Map Endpoint, http://rest.ensembl.org/documentation/info/assembly_map """ if not lineage.create_dir(self._resources_dir): return None chroms = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "X", "Y", "MT", ] assembly_mapping_data = source_assembly + "_" + target_assembly destination = os.path.join( self._resources_dir, assembly_mapping_data + ".tar.gz" ) if not os.path.exists(destination) or not self._all_chroms_in_tar( chroms, destination ): print("Downloading {}".format(os.path.relpath(destination))) try: with tarfile.open(destination, "w:gz") as out_tar: for chrom in chroms: file = chrom + ".json" map_endpoint = ( "/map/human/" + source_assembly + "/" + chrom + "/" + target_assembly + "?" ) # get assembly mapping data response = None retry = 0 while response is None and retry < retries: response = self._ensembl_rest_client.perform_rest_action( map_endpoint ) retry += 1 if response is not None: # open temp file, save json response to file, close temp file with tempfile.NamedTemporaryFile( delete=False, mode="w" ) as f: json.dump(response, f) # add temp file to archive out_tar.add(f.name, arcname=file) # remove temp file os.remove(f.name) except Exception as err: print(err) return None return destination
python
def _get_path_assembly_mapping_data( self, source_assembly, target_assembly, retries=10 ): """ Get local path to assembly mapping data, downloading if necessary. Parameters ---------- source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap from target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap to retries : int number of retries per chromosome to download assembly mapping data Returns ------- str path to <source_assembly>_<target_assembly>.tar.gz References ---------- ..[1] Ensembl, Assembly Information Endpoint, https://rest.ensembl.org/documentation/info/assembly_info ..[2] Ensembl, Assembly Map Endpoint, http://rest.ensembl.org/documentation/info/assembly_map """ if not lineage.create_dir(self._resources_dir): return None chroms = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "X", "Y", "MT", ] assembly_mapping_data = source_assembly + "_" + target_assembly destination = os.path.join( self._resources_dir, assembly_mapping_data + ".tar.gz" ) if not os.path.exists(destination) or not self._all_chroms_in_tar( chroms, destination ): print("Downloading {}".format(os.path.relpath(destination))) try: with tarfile.open(destination, "w:gz") as out_tar: for chrom in chroms: file = chrom + ".json" map_endpoint = ( "/map/human/" + source_assembly + "/" + chrom + "/" + target_assembly + "?" ) # get assembly mapping data response = None retry = 0 while response is None and retry < retries: response = self._ensembl_rest_client.perform_rest_action( map_endpoint ) retry += 1 if response is not None: # open temp file, save json response to file, close temp file with tempfile.NamedTemporaryFile( delete=False, mode="w" ) as f: json.dump(response, f) # add temp file to archive out_tar.add(f.name, arcname=file) # remove temp file os.remove(f.name) except Exception as err: print(err) return None return destination
[ "def", "_get_path_assembly_mapping_data", "(", "self", ",", "source_assembly", ",", "target_assembly", ",", "retries", "=", "10", ")", ":", "if", "not", "lineage", ".", "create_dir", "(", "self", ".", "_resources_dir", ")", ":", "return", "None", "chroms", "=", "[", "\"1\"", ",", "\"2\"", ",", "\"3\"", ",", "\"4\"", ",", "\"5\"", ",", "\"6\"", ",", "\"7\"", ",", "\"8\"", ",", "\"9\"", ",", "\"10\"", ",", "\"11\"", ",", "\"12\"", ",", "\"13\"", ",", "\"14\"", ",", "\"15\"", ",", "\"16\"", ",", "\"17\"", ",", "\"18\"", ",", "\"19\"", ",", "\"20\"", ",", "\"21\"", ",", "\"22\"", ",", "\"X\"", ",", "\"Y\"", ",", "\"MT\"", ",", "]", "assembly_mapping_data", "=", "source_assembly", "+", "\"_\"", "+", "target_assembly", "destination", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_resources_dir", ",", "assembly_mapping_data", "+", "\".tar.gz\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "destination", ")", "or", "not", "self", ".", "_all_chroms_in_tar", "(", "chroms", ",", "destination", ")", ":", "print", "(", "\"Downloading {}\"", ".", "format", "(", "os", ".", "path", ".", "relpath", "(", "destination", ")", ")", ")", "try", ":", "with", "tarfile", ".", "open", "(", "destination", ",", "\"w:gz\"", ")", "as", "out_tar", ":", "for", "chrom", "in", "chroms", ":", "file", "=", "chrom", "+", "\".json\"", "map_endpoint", "=", "(", "\"/map/human/\"", "+", "source_assembly", "+", "\"/\"", "+", "chrom", "+", "\"/\"", "+", "target_assembly", "+", "\"?\"", ")", "# get assembly mapping data", "response", "=", "None", "retry", "=", "0", "while", "response", "is", "None", "and", "retry", "<", "retries", ":", "response", "=", "self", ".", "_ensembl_rest_client", ".", "perform_rest_action", "(", "map_endpoint", ")", "retry", "+=", "1", "if", "response", "is", "not", "None", ":", "# open temp file, save json response to file, close temp file", "with", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "mode", "=", "\"w\"", ")", "as", "f", ":", "json", ".", "dump", "(", "response", ",", "f", ")", "# add temp file to archive", "out_tar", ".", "add", "(", "f", ".", "name", ",", "arcname", "=", "file", ")", "# remove temp file", "os", ".", "remove", "(", "f", ".", "name", ")", "except", "Exception", "as", "err", ":", "print", "(", "err", ")", "return", "None", "return", "destination" ]
Get local path to assembly mapping data, downloading if necessary. Parameters ---------- source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap from target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap to retries : int number of retries per chromosome to download assembly mapping data Returns ------- str path to <source_assembly>_<target_assembly>.tar.gz References ---------- ..[1] Ensembl, Assembly Information Endpoint, https://rest.ensembl.org/documentation/info/assembly_info ..[2] Ensembl, Assembly Map Endpoint, http://rest.ensembl.org/documentation/info/assembly_map
[ "Get", "local", "path", "to", "assembly", "mapping", "data", "downloading", "if", "necessary", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L517-L626
5,589
apriha/lineage
src/lineage/resources.py
Resources._download_file
def _download_file(self, url, filename, compress=False, timeout=30): """ Download a file to the resources folder. Download data from `url`, save as `filename`, and optionally compress with gzip. Parameters ---------- url : str URL to download data from filename : str name of file to save; if compress, ensure '.gz' is appended compress : bool compress with gzip timeout : int seconds for timeout of download request Returns ------- str path to downloaded file, None if error """ if not lineage.create_dir(self._resources_dir): return None if compress and filename[-3:] != ".gz": filename += ".gz" destination = os.path.join(self._resources_dir, filename) if not os.path.exists(destination): try: if compress: open_func = gzip.open else: open_func = open # get file if it hasn't already been downloaded # http://stackoverflow.com/a/7244263 with urllib.request.urlopen( url, timeout=timeout ) as response, open_func(destination, "wb") as f: self._print_download_msg(destination) data = response.read() # a `bytes` object f.write(data) except urllib.error.URLError as err: print(err) destination = None # try HTTP if an FTP error occurred if "ftp://" in url: destination = self._download_file( url.replace("ftp://", "http://"), filename, compress=compress, timeout=timeout, ) except Exception as err: print(err) return None return destination
python
def _download_file(self, url, filename, compress=False, timeout=30): """ Download a file to the resources folder. Download data from `url`, save as `filename`, and optionally compress with gzip. Parameters ---------- url : str URL to download data from filename : str name of file to save; if compress, ensure '.gz' is appended compress : bool compress with gzip timeout : int seconds for timeout of download request Returns ------- str path to downloaded file, None if error """ if not lineage.create_dir(self._resources_dir): return None if compress and filename[-3:] != ".gz": filename += ".gz" destination = os.path.join(self._resources_dir, filename) if not os.path.exists(destination): try: if compress: open_func = gzip.open else: open_func = open # get file if it hasn't already been downloaded # http://stackoverflow.com/a/7244263 with urllib.request.urlopen( url, timeout=timeout ) as response, open_func(destination, "wb") as f: self._print_download_msg(destination) data = response.read() # a `bytes` object f.write(data) except urllib.error.URLError as err: print(err) destination = None # try HTTP if an FTP error occurred if "ftp://" in url: destination = self._download_file( url.replace("ftp://", "http://"), filename, compress=compress, timeout=timeout, ) except Exception as err: print(err) return None return destination
[ "def", "_download_file", "(", "self", ",", "url", ",", "filename", ",", "compress", "=", "False", ",", "timeout", "=", "30", ")", ":", "if", "not", "lineage", ".", "create_dir", "(", "self", ".", "_resources_dir", ")", ":", "return", "None", "if", "compress", "and", "filename", "[", "-", "3", ":", "]", "!=", "\".gz\"", ":", "filename", "+=", "\".gz\"", "destination", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_resources_dir", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "destination", ")", ":", "try", ":", "if", "compress", ":", "open_func", "=", "gzip", ".", "open", "else", ":", "open_func", "=", "open", "# get file if it hasn't already been downloaded", "# http://stackoverflow.com/a/7244263", "with", "urllib", ".", "request", ".", "urlopen", "(", "url", ",", "timeout", "=", "timeout", ")", "as", "response", ",", "open_func", "(", "destination", ",", "\"wb\"", ")", "as", "f", ":", "self", ".", "_print_download_msg", "(", "destination", ")", "data", "=", "response", ".", "read", "(", ")", "# a `bytes` object", "f", ".", "write", "(", "data", ")", "except", "urllib", ".", "error", ".", "URLError", "as", "err", ":", "print", "(", "err", ")", "destination", "=", "None", "# try HTTP if an FTP error occurred", "if", "\"ftp://\"", "in", "url", ":", "destination", "=", "self", ".", "_download_file", "(", "url", ".", "replace", "(", "\"ftp://\"", ",", "\"http://\"", ")", ",", "filename", ",", "compress", "=", "compress", ",", "timeout", "=", "timeout", ",", ")", "except", "Exception", "as", "err", ":", "print", "(", "err", ")", "return", "None", "return", "destination" ]
Download a file to the resources folder. Download data from `url`, save as `filename`, and optionally compress with gzip. Parameters ---------- url : str URL to download data from filename : str name of file to save; if compress, ensure '.gz' is appended compress : bool compress with gzip timeout : int seconds for timeout of download request Returns ------- str path to downloaded file, None if error
[ "Download", "a", "file", "to", "the", "resources", "folder", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L642-L701
5,590
apriha/lineage
src/lineage/individual.py
Individual.load_snps
def load_snps( self, raw_data, discrepant_snp_positions_threshold=100, discrepant_genotypes_threshold=500, save_output=False, ): """ Load raw genotype data. Parameters ---------- raw_data : list or str path(s) to file(s) with raw genotype data discrepant_snp_positions_threshold : int threshold for discrepant SNP positions between existing data and data to be loaded, a large value could indicate mismatched genome assemblies discrepant_genotypes_threshold : int threshold for discrepant genotype data between existing data and data to be loaded, a large value could indicated mismatched individuals save_output : bool specifies whether to save discrepant SNP output to CSV files in the output directory """ if type(raw_data) is list: for file in raw_data: self._load_snps_helper( file, discrepant_snp_positions_threshold, discrepant_genotypes_threshold, save_output, ) elif type(raw_data) is str: self._load_snps_helper( raw_data, discrepant_snp_positions_threshold, discrepant_genotypes_threshold, save_output, ) else: raise TypeError("invalid filetype")
python
def load_snps( self, raw_data, discrepant_snp_positions_threshold=100, discrepant_genotypes_threshold=500, save_output=False, ): """ Load raw genotype data. Parameters ---------- raw_data : list or str path(s) to file(s) with raw genotype data discrepant_snp_positions_threshold : int threshold for discrepant SNP positions between existing data and data to be loaded, a large value could indicate mismatched genome assemblies discrepant_genotypes_threshold : int threshold for discrepant genotype data between existing data and data to be loaded, a large value could indicated mismatched individuals save_output : bool specifies whether to save discrepant SNP output to CSV files in the output directory """ if type(raw_data) is list: for file in raw_data: self._load_snps_helper( file, discrepant_snp_positions_threshold, discrepant_genotypes_threshold, save_output, ) elif type(raw_data) is str: self._load_snps_helper( raw_data, discrepant_snp_positions_threshold, discrepant_genotypes_threshold, save_output, ) else: raise TypeError("invalid filetype")
[ "def", "load_snps", "(", "self", ",", "raw_data", ",", "discrepant_snp_positions_threshold", "=", "100", ",", "discrepant_genotypes_threshold", "=", "500", ",", "save_output", "=", "False", ",", ")", ":", "if", "type", "(", "raw_data", ")", "is", "list", ":", "for", "file", "in", "raw_data", ":", "self", ".", "_load_snps_helper", "(", "file", ",", "discrepant_snp_positions_threshold", ",", "discrepant_genotypes_threshold", ",", "save_output", ",", ")", "elif", "type", "(", "raw_data", ")", "is", "str", ":", "self", ".", "_load_snps_helper", "(", "raw_data", ",", "discrepant_snp_positions_threshold", ",", "discrepant_genotypes_threshold", ",", "save_output", ",", ")", "else", ":", "raise", "TypeError", "(", "\"invalid filetype\"", ")" ]
Load raw genotype data. Parameters ---------- raw_data : list or str path(s) to file(s) with raw genotype data discrepant_snp_positions_threshold : int threshold for discrepant SNP positions between existing data and data to be loaded, a large value could indicate mismatched genome assemblies discrepant_genotypes_threshold : int threshold for discrepant genotype data between existing data and data to be loaded, a large value could indicated mismatched individuals save_output : bool specifies whether to save discrepant SNP output to CSV files in the output directory
[ "Load", "raw", "genotype", "data", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L205-L243
5,591
apriha/lineage
src/lineage/individual.py
Individual.save_snps
def save_snps(self, filename=None): """ Save SNPs to file. Parameters ---------- filename : str filename for file to save Returns ------- str path to file in output directory if SNPs were saved, else empty str """ comment = ( "# Source(s): {}\n" "# Assembly: {}\n" "# SNPs: {}\n" "# Chromosomes: {}\n".format( self.source, self.assembly, self.snp_count, self.chromosomes_summary ) ) if filename is None: filename = self.get_var_name() + "_lineage_" + self.assembly + ".csv" return lineage.save_df_as_csv( self._snps, self._output_dir, filename, comment=comment, header=["chromosome", "position", "genotype"], )
python
def save_snps(self, filename=None): """ Save SNPs to file. Parameters ---------- filename : str filename for file to save Returns ------- str path to file in output directory if SNPs were saved, else empty str """ comment = ( "# Source(s): {}\n" "# Assembly: {}\n" "# SNPs: {}\n" "# Chromosomes: {}\n".format( self.source, self.assembly, self.snp_count, self.chromosomes_summary ) ) if filename is None: filename = self.get_var_name() + "_lineage_" + self.assembly + ".csv" return lineage.save_df_as_csv( self._snps, self._output_dir, filename, comment=comment, header=["chromosome", "position", "genotype"], )
[ "def", "save_snps", "(", "self", ",", "filename", "=", "None", ")", ":", "comment", "=", "(", "\"# Source(s): {}\\n\"", "\"# Assembly: {}\\n\"", "\"# SNPs: {}\\n\"", "\"# Chromosomes: {}\\n\"", ".", "format", "(", "self", ".", "source", ",", "self", ".", "assembly", ",", "self", ".", "snp_count", ",", "self", ".", "chromosomes_summary", ")", ")", "if", "filename", "is", "None", ":", "filename", "=", "self", ".", "get_var_name", "(", ")", "+", "\"_lineage_\"", "+", "self", ".", "assembly", "+", "\".csv\"", "return", "lineage", ".", "save_df_as_csv", "(", "self", ".", "_snps", ",", "self", ".", "_output_dir", ",", "filename", ",", "comment", "=", "comment", ",", "header", "=", "[", "\"chromosome\"", ",", "\"position\"", ",", "\"genotype\"", "]", ",", ")" ]
Save SNPs to file. Parameters ---------- filename : str filename for file to save Returns ------- str path to file in output directory if SNPs were saved, else empty str
[ "Save", "SNPs", "to", "file", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L267-L298
5,592
apriha/lineage
src/lineage/individual.py
Individual.remap_snps
def remap_snps(self, target_assembly, complement_bases=True): """ Remap the SNP coordinates of this ``Individual`` from one assembly to another. This method is a wrapper for `remap_snps` in the ``Lineage`` class. This method uses the assembly map endpoint of the Ensembl REST API service to convert SNP coordinates / positions from one assembly to another. After remapping, the coordinates / positions for the ``Individual``'s SNPs will be that of the target assembly. If the SNPs are already mapped relative to the target assembly, remapping will not be performed. Parameters ---------- target_assembly : {'NCBI36', 'GRCh37', 'GRCh38', 36, 37, 38} assembly to remap to complement_bases : bool complement bases when remapping SNPs to the minus strand Returns ------- chromosomes_remapped : list of str chromosomes remapped; empty if None chromosomes_not_remapped : list of str chromosomes not remapped; empty if None Notes ----- An assembly is also know as a "build." For example: Assembly NCBI36 = Build 36 Assembly GRCh37 = Build 37 Assembly GRCh38 = Build 38 See https://www.ncbi.nlm.nih.gov/assembly for more information about assemblies and remapping. References ---------- ..[1] Ensembl, Assembly Map Endpoint, http://rest.ensembl.org/documentation/info/assembly_map """ from lineage import Lineage l = Lineage() return l.remap_snps(self, target_assembly, complement_bases)
python
def remap_snps(self, target_assembly, complement_bases=True): """ Remap the SNP coordinates of this ``Individual`` from one assembly to another. This method is a wrapper for `remap_snps` in the ``Lineage`` class. This method uses the assembly map endpoint of the Ensembl REST API service to convert SNP coordinates / positions from one assembly to another. After remapping, the coordinates / positions for the ``Individual``'s SNPs will be that of the target assembly. If the SNPs are already mapped relative to the target assembly, remapping will not be performed. Parameters ---------- target_assembly : {'NCBI36', 'GRCh37', 'GRCh38', 36, 37, 38} assembly to remap to complement_bases : bool complement bases when remapping SNPs to the minus strand Returns ------- chromosomes_remapped : list of str chromosomes remapped; empty if None chromosomes_not_remapped : list of str chromosomes not remapped; empty if None Notes ----- An assembly is also know as a "build." For example: Assembly NCBI36 = Build 36 Assembly GRCh37 = Build 37 Assembly GRCh38 = Build 38 See https://www.ncbi.nlm.nih.gov/assembly for more information about assemblies and remapping. References ---------- ..[1] Ensembl, Assembly Map Endpoint, http://rest.ensembl.org/documentation/info/assembly_map """ from lineage import Lineage l = Lineage() return l.remap_snps(self, target_assembly, complement_bases)
[ "def", "remap_snps", "(", "self", ",", "target_assembly", ",", "complement_bases", "=", "True", ")", ":", "from", "lineage", "import", "Lineage", "l", "=", "Lineage", "(", ")", "return", "l", ".", "remap_snps", "(", "self", ",", "target_assembly", ",", "complement_bases", ")" ]
Remap the SNP coordinates of this ``Individual`` from one assembly to another. This method is a wrapper for `remap_snps` in the ``Lineage`` class. This method uses the assembly map endpoint of the Ensembl REST API service to convert SNP coordinates / positions from one assembly to another. After remapping, the coordinates / positions for the ``Individual``'s SNPs will be that of the target assembly. If the SNPs are already mapped relative to the target assembly, remapping will not be performed. Parameters ---------- target_assembly : {'NCBI36', 'GRCh37', 'GRCh38', 36, 37, 38} assembly to remap to complement_bases : bool complement bases when remapping SNPs to the minus strand Returns ------- chromosomes_remapped : list of str chromosomes remapped; empty if None chromosomes_not_remapped : list of str chromosomes not remapped; empty if None Notes ----- An assembly is also know as a "build." For example: Assembly NCBI36 = Build 36 Assembly GRCh37 = Build 37 Assembly GRCh38 = Build 38 See https://www.ncbi.nlm.nih.gov/assembly for more information about assemblies and remapping. References ---------- ..[1] Ensembl, Assembly Map Endpoint, http://rest.ensembl.org/documentation/info/assembly_map
[ "Remap", "the", "SNP", "coordinates", "of", "this", "Individual", "from", "one", "assembly", "to", "another", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L382-L427
5,593
apriha/lineage
src/lineage/individual.py
Individual._set_snps
def _set_snps(self, snps, build=37): """ Set `_snps` and `_build` properties of this ``Individual``. Notes ----- Intended to be used internally to `lineage`. Parameters ---------- snps : pandas.DataFrame individual's genetic data normalized for use with `lineage` build : int build of this ``Individual``'s SNPs """ self._snps = snps self._build = build
python
def _set_snps(self, snps, build=37): """ Set `_snps` and `_build` properties of this ``Individual``. Notes ----- Intended to be used internally to `lineage`. Parameters ---------- snps : pandas.DataFrame individual's genetic data normalized for use with `lineage` build : int build of this ``Individual``'s SNPs """ self._snps = snps self._build = build
[ "def", "_set_snps", "(", "self", ",", "snps", ",", "build", "=", "37", ")", ":", "self", ".", "_snps", "=", "snps", "self", ".", "_build", "=", "build" ]
Set `_snps` and `_build` properties of this ``Individual``. Notes ----- Intended to be used internally to `lineage`. Parameters ---------- snps : pandas.DataFrame individual's genetic data normalized for use with `lineage` build : int build of this ``Individual``'s SNPs
[ "Set", "_snps", "and", "_build", "properties", "of", "this", "Individual", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L429-L444
5,594
apriha/lineage
src/lineage/individual.py
Individual._double_single_alleles
def _double_single_alleles(df, chrom): """ Double any single alleles in the specified chromosome. Parameters ---------- df : pandas.DataFrame SNPs chrom : str chromosome of alleles to double Returns ------- df : pandas.DataFrame SNPs with specified chromosome's single alleles doubled """ # find all single alleles of the specified chromosome single_alleles = np.where( (df["chrom"] == chrom) & (df["genotype"].str.len() == 1) )[0] # double those alleles df.ix[single_alleles, "genotype"] = df.ix[single_alleles, "genotype"] * 2 return df
python
def _double_single_alleles(df, chrom): """ Double any single alleles in the specified chromosome. Parameters ---------- df : pandas.DataFrame SNPs chrom : str chromosome of alleles to double Returns ------- df : pandas.DataFrame SNPs with specified chromosome's single alleles doubled """ # find all single alleles of the specified chromosome single_alleles = np.where( (df["chrom"] == chrom) & (df["genotype"].str.len() == 1) )[0] # double those alleles df.ix[single_alleles, "genotype"] = df.ix[single_alleles, "genotype"] * 2 return df
[ "def", "_double_single_alleles", "(", "df", ",", "chrom", ")", ":", "# find all single alleles of the specified chromosome", "single_alleles", "=", "np", ".", "where", "(", "(", "df", "[", "\"chrom\"", "]", "==", "chrom", ")", "&", "(", "df", "[", "\"genotype\"", "]", ".", "str", ".", "len", "(", ")", "==", "1", ")", ")", "[", "0", "]", "# double those alleles", "df", ".", "ix", "[", "single_alleles", ",", "\"genotype\"", "]", "=", "df", ".", "ix", "[", "single_alleles", ",", "\"genotype\"", "]", "*", "2", "return", "df" ]
Double any single alleles in the specified chromosome. Parameters ---------- df : pandas.DataFrame SNPs chrom : str chromosome of alleles to double Returns ------- df : pandas.DataFrame SNPs with specified chromosome's single alleles doubled
[ "Double", "any", "single", "alleles", "in", "the", "specified", "chromosome", "." ]
13106a62a959a80ac26c68d1566422de08aa877b
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L605-L628
5,595
tBuLi/symfit
symfit/core/support.py
seperate_symbols
def seperate_symbols(func): """ Seperate the symbols in symbolic function func. Return them in alphabetical order. :param func: scipy symbolic function. :return: (vars, params), a tuple of all variables and parameters, each sorted in alphabetical order. :raises TypeError: only symfit Variable and Parameter are allowed, not sympy Symbols. """ params = [] vars = [] for symbol in func.free_symbols: if not isidentifier(str(symbol)): continue # E.g. Indexed objects might print to A[i, j] if isinstance(symbol, Parameter): params.append(symbol) elif isinstance(symbol, Idx): # Idx objects are not seen as parameters or vars. pass elif isinstance(symbol, (MatrixExpr, Expr)): vars.append(symbol) else: raise TypeError('model contains an unknown symbol type, {}'.format(type(symbol))) for der in func.atoms(sympy.Derivative): # Used by jacobians and hessians, where derivatives are treated as # Variables. This way of writing it is purposefully discriminatory # against derivatives wrt variables, since such derivatives should be # performed explicitly in the case of jacs/hess, and are treated # differently in the case of ODEModels. if der.expr in vars and all(isinstance(s, Parameter) for s in der.variables): vars.append(der) params.sort(key=lambda symbol: symbol.name) vars.sort(key=lambda symbol: symbol.name) return vars, params
python
def seperate_symbols(func): """ Seperate the symbols in symbolic function func. Return them in alphabetical order. :param func: scipy symbolic function. :return: (vars, params), a tuple of all variables and parameters, each sorted in alphabetical order. :raises TypeError: only symfit Variable and Parameter are allowed, not sympy Symbols. """ params = [] vars = [] for symbol in func.free_symbols: if not isidentifier(str(symbol)): continue # E.g. Indexed objects might print to A[i, j] if isinstance(symbol, Parameter): params.append(symbol) elif isinstance(symbol, Idx): # Idx objects are not seen as parameters or vars. pass elif isinstance(symbol, (MatrixExpr, Expr)): vars.append(symbol) else: raise TypeError('model contains an unknown symbol type, {}'.format(type(symbol))) for der in func.atoms(sympy.Derivative): # Used by jacobians and hessians, where derivatives are treated as # Variables. This way of writing it is purposefully discriminatory # against derivatives wrt variables, since such derivatives should be # performed explicitly in the case of jacs/hess, and are treated # differently in the case of ODEModels. if der.expr in vars and all(isinstance(s, Parameter) for s in der.variables): vars.append(der) params.sort(key=lambda symbol: symbol.name) vars.sort(key=lambda symbol: symbol.name) return vars, params
[ "def", "seperate_symbols", "(", "func", ")", ":", "params", "=", "[", "]", "vars", "=", "[", "]", "for", "symbol", "in", "func", ".", "free_symbols", ":", "if", "not", "isidentifier", "(", "str", "(", "symbol", ")", ")", ":", "continue", "# E.g. Indexed objects might print to A[i, j]", "if", "isinstance", "(", "symbol", ",", "Parameter", ")", ":", "params", ".", "append", "(", "symbol", ")", "elif", "isinstance", "(", "symbol", ",", "Idx", ")", ":", "# Idx objects are not seen as parameters or vars.", "pass", "elif", "isinstance", "(", "symbol", ",", "(", "MatrixExpr", ",", "Expr", ")", ")", ":", "vars", ".", "append", "(", "symbol", ")", "else", ":", "raise", "TypeError", "(", "'model contains an unknown symbol type, {}'", ".", "format", "(", "type", "(", "symbol", ")", ")", ")", "for", "der", "in", "func", ".", "atoms", "(", "sympy", ".", "Derivative", ")", ":", "# Used by jacobians and hessians, where derivatives are treated as", "# Variables. This way of writing it is purposefully discriminatory", "# against derivatives wrt variables, since such derivatives should be", "# performed explicitly in the case of jacs/hess, and are treated", "# differently in the case of ODEModels.", "if", "der", ".", "expr", "in", "vars", "and", "all", "(", "isinstance", "(", "s", ",", "Parameter", ")", "for", "s", "in", "der", ".", "variables", ")", ":", "vars", ".", "append", "(", "der", ")", "params", ".", "sort", "(", "key", "=", "lambda", "symbol", ":", "symbol", ".", "name", ")", "vars", ".", "sort", "(", "key", "=", "lambda", "symbol", ":", "symbol", ".", "name", ")", "return", "vars", ",", "params" ]
Seperate the symbols in symbolic function func. Return them in alphabetical order. :param func: scipy symbolic function. :return: (vars, params), a tuple of all variables and parameters, each sorted in alphabetical order. :raises TypeError: only symfit Variable and Parameter are allowed, not sympy Symbols.
[ "Seperate", "the", "symbols", "in", "symbolic", "function", "func", ".", "Return", "them", "in", "alphabetical", "order", "." ]
759dd3d1d4270510d651f40b23dd26b1b10eee83
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L69-L106
5,596
tBuLi/symfit
symfit/core/support.py
sympy_to_py
def sympy_to_py(func, args): """ Turn a symbolic expression into a Python lambda function, which has the names of the variables and parameters as it's argument names. :param func: sympy expression :param args: variables and parameters in this model :return: lambda function to be used for numerical evaluation of the model. """ # replace the derivatives with printable variables. derivatives = {var: Variable(var.name) for var in args if isinstance(var, sympy.Derivative)} func = func.xreplace(derivatives) args = [derivatives[var] if isinstance(var, sympy.Derivative) else var for var in args] lambdafunc = lambdify(args, func, printer=SymfitNumPyPrinter, dummify=False) # Check if the names of the lambda function are what we expect signature = inspect_sig.signature(lambdafunc) sig_parameters = OrderedDict(signature.parameters) for arg, lambda_arg in zip(args, sig_parameters): if arg.name != lambda_arg: break else: # Lambdifying succesful! return lambdafunc # If we are here (very rare), then one of the lambda arg is still a Dummy. # In this case we will manually handle the naming. lambda_names = sig_parameters.keys() arg_names = [arg.name for arg in args] conversion = dict(zip(arg_names, lambda_names)) # Wrap the lambda such that arg names are translated into the correct dummy # symbol names @wraps(lambdafunc) def wrapped_lambdafunc(*ordered_args, **kwargs): converted_kwargs = {conversion[k]: v for k, v in kwargs.items()} return lambdafunc(*ordered_args, **converted_kwargs) # Update the signature of wrapped_lambdafunc to math our args new_sig_parameters = OrderedDict() for arg_name, dummy_name in conversion.items(): if arg_name == dummy_name: # Already has the correct name new_sig_parameters[arg_name] = sig_parameters[arg_name] else: # Change the dummy inspect.Parameter to the correct name param = sig_parameters[dummy_name] param = param.replace(name=arg_name) new_sig_parameters[arg_name] = param wrapped_lambdafunc.__signature__ = signature.replace( parameters=new_sig_parameters.values() ) return wrapped_lambdafunc
python
def sympy_to_py(func, args): """ Turn a symbolic expression into a Python lambda function, which has the names of the variables and parameters as it's argument names. :param func: sympy expression :param args: variables and parameters in this model :return: lambda function to be used for numerical evaluation of the model. """ # replace the derivatives with printable variables. derivatives = {var: Variable(var.name) for var in args if isinstance(var, sympy.Derivative)} func = func.xreplace(derivatives) args = [derivatives[var] if isinstance(var, sympy.Derivative) else var for var in args] lambdafunc = lambdify(args, func, printer=SymfitNumPyPrinter, dummify=False) # Check if the names of the lambda function are what we expect signature = inspect_sig.signature(lambdafunc) sig_parameters = OrderedDict(signature.parameters) for arg, lambda_arg in zip(args, sig_parameters): if arg.name != lambda_arg: break else: # Lambdifying succesful! return lambdafunc # If we are here (very rare), then one of the lambda arg is still a Dummy. # In this case we will manually handle the naming. lambda_names = sig_parameters.keys() arg_names = [arg.name for arg in args] conversion = dict(zip(arg_names, lambda_names)) # Wrap the lambda such that arg names are translated into the correct dummy # symbol names @wraps(lambdafunc) def wrapped_lambdafunc(*ordered_args, **kwargs): converted_kwargs = {conversion[k]: v for k, v in kwargs.items()} return lambdafunc(*ordered_args, **converted_kwargs) # Update the signature of wrapped_lambdafunc to math our args new_sig_parameters = OrderedDict() for arg_name, dummy_name in conversion.items(): if arg_name == dummy_name: # Already has the correct name new_sig_parameters[arg_name] = sig_parameters[arg_name] else: # Change the dummy inspect.Parameter to the correct name param = sig_parameters[dummy_name] param = param.replace(name=arg_name) new_sig_parameters[arg_name] = param wrapped_lambdafunc.__signature__ = signature.replace( parameters=new_sig_parameters.values() ) return wrapped_lambdafunc
[ "def", "sympy_to_py", "(", "func", ",", "args", ")", ":", "# replace the derivatives with printable variables.", "derivatives", "=", "{", "var", ":", "Variable", "(", "var", ".", "name", ")", "for", "var", "in", "args", "if", "isinstance", "(", "var", ",", "sympy", ".", "Derivative", ")", "}", "func", "=", "func", ".", "xreplace", "(", "derivatives", ")", "args", "=", "[", "derivatives", "[", "var", "]", "if", "isinstance", "(", "var", ",", "sympy", ".", "Derivative", ")", "else", "var", "for", "var", "in", "args", "]", "lambdafunc", "=", "lambdify", "(", "args", ",", "func", ",", "printer", "=", "SymfitNumPyPrinter", ",", "dummify", "=", "False", ")", "# Check if the names of the lambda function are what we expect", "signature", "=", "inspect_sig", ".", "signature", "(", "lambdafunc", ")", "sig_parameters", "=", "OrderedDict", "(", "signature", ".", "parameters", ")", "for", "arg", ",", "lambda_arg", "in", "zip", "(", "args", ",", "sig_parameters", ")", ":", "if", "arg", ".", "name", "!=", "lambda_arg", ":", "break", "else", ":", "# Lambdifying succesful!", "return", "lambdafunc", "# If we are here (very rare), then one of the lambda arg is still a Dummy.", "# In this case we will manually handle the naming.", "lambda_names", "=", "sig_parameters", ".", "keys", "(", ")", "arg_names", "=", "[", "arg", ".", "name", "for", "arg", "in", "args", "]", "conversion", "=", "dict", "(", "zip", "(", "arg_names", ",", "lambda_names", ")", ")", "# Wrap the lambda such that arg names are translated into the correct dummy", "# symbol names", "@", "wraps", "(", "lambdafunc", ")", "def", "wrapped_lambdafunc", "(", "*", "ordered_args", ",", "*", "*", "kwargs", ")", ":", "converted_kwargs", "=", "{", "conversion", "[", "k", "]", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "}", "return", "lambdafunc", "(", "*", "ordered_args", ",", "*", "*", "converted_kwargs", ")", "# Update the signature of wrapped_lambdafunc to math our args", "new_sig_parameters", "=", "OrderedDict", "(", ")", "for", "arg_name", ",", "dummy_name", "in", "conversion", ".", "items", "(", ")", ":", "if", "arg_name", "==", "dummy_name", ":", "# Already has the correct name", "new_sig_parameters", "[", "arg_name", "]", "=", "sig_parameters", "[", "arg_name", "]", "else", ":", "# Change the dummy inspect.Parameter to the correct name", "param", "=", "sig_parameters", "[", "dummy_name", "]", "param", "=", "param", ".", "replace", "(", "name", "=", "arg_name", ")", "new_sig_parameters", "[", "arg_name", "]", "=", "param", "wrapped_lambdafunc", ".", "__signature__", "=", "signature", ".", "replace", "(", "parameters", "=", "new_sig_parameters", ".", "values", "(", ")", ")", "return", "wrapped_lambdafunc" ]
Turn a symbolic expression into a Python lambda function, which has the names of the variables and parameters as it's argument names. :param func: sympy expression :param args: variables and parameters in this model :return: lambda function to be used for numerical evaluation of the model.
[ "Turn", "a", "symbolic", "expression", "into", "a", "Python", "lambda", "function", "which", "has", "the", "names", "of", "the", "variables", "and", "parameters", "as", "it", "s", "argument", "names", "." ]
759dd3d1d4270510d651f40b23dd26b1b10eee83
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L108-L160
5,597
tBuLi/symfit
symfit/core/support.py
sympy_to_scipy
def sympy_to_scipy(func, vars, params): """ Convert a symbolic expression to one scipy digs. Not used by ``symfit`` any more. :param func: sympy expression :param vars: variables :param params: parameters :return: Scipy-style function to be used for numerical evaluation of the model. """ lambda_func = sympy_to_py(func, vars, params) def f(x, p): """ Scipy style function. :param x: list of arrays, NxM :param p: tuple of parameter values. """ x = np.atleast_2d(x) y = [x[i] for i in range(len(x))] if len(x[0]) else [] try: ans = lambda_func(*(y + list(p))) except TypeError: # Possibly this is a constant function in which case it only has Parameters. ans = lambda_func(*list(p))# * np.ones(x_shape) return ans return f
python
def sympy_to_scipy(func, vars, params): """ Convert a symbolic expression to one scipy digs. Not used by ``symfit`` any more. :param func: sympy expression :param vars: variables :param params: parameters :return: Scipy-style function to be used for numerical evaluation of the model. """ lambda_func = sympy_to_py(func, vars, params) def f(x, p): """ Scipy style function. :param x: list of arrays, NxM :param p: tuple of parameter values. """ x = np.atleast_2d(x) y = [x[i] for i in range(len(x))] if len(x[0]) else [] try: ans = lambda_func(*(y + list(p))) except TypeError: # Possibly this is a constant function in which case it only has Parameters. ans = lambda_func(*list(p))# * np.ones(x_shape) return ans return f
[ "def", "sympy_to_scipy", "(", "func", ",", "vars", ",", "params", ")", ":", "lambda_func", "=", "sympy_to_py", "(", "func", ",", "vars", ",", "params", ")", "def", "f", "(", "x", ",", "p", ")", ":", "\"\"\"\n Scipy style function.\n\n :param x: list of arrays, NxM\n :param p: tuple of parameter values.\n \"\"\"", "x", "=", "np", ".", "atleast_2d", "(", "x", ")", "y", "=", "[", "x", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "x", ")", ")", "]", "if", "len", "(", "x", "[", "0", "]", ")", "else", "[", "]", "try", ":", "ans", "=", "lambda_func", "(", "*", "(", "y", "+", "list", "(", "p", ")", ")", ")", "except", "TypeError", ":", "# Possibly this is a constant function in which case it only has Parameters.", "ans", "=", "lambda_func", "(", "*", "list", "(", "p", ")", ")", "# * np.ones(x_shape)", "return", "ans", "return", "f" ]
Convert a symbolic expression to one scipy digs. Not used by ``symfit`` any more. :param func: sympy expression :param vars: variables :param params: parameters :return: Scipy-style function to be used for numerical evaluation of the model.
[ "Convert", "a", "symbolic", "expression", "to", "one", "scipy", "digs", ".", "Not", "used", "by", "symfit", "any", "more", "." ]
759dd3d1d4270510d651f40b23dd26b1b10eee83
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L162-L188
5,598
tBuLi/symfit
symfit/core/support.py
jacobian
def jacobian(expr, symbols): """ Derive a symbolic expr w.r.t. each symbol in symbols. This returns a symbolic jacobian vector. :param expr: A sympy Expr. :param symbols: The symbols w.r.t. which to derive. """ jac = [] for symbol in symbols: # Differentiate to every param f = sympy.diff(expr, symbol) jac.append(f) return jac
python
def jacobian(expr, symbols): """ Derive a symbolic expr w.r.t. each symbol in symbols. This returns a symbolic jacobian vector. :param expr: A sympy Expr. :param symbols: The symbols w.r.t. which to derive. """ jac = [] for symbol in symbols: # Differentiate to every param f = sympy.diff(expr, symbol) jac.append(f) return jac
[ "def", "jacobian", "(", "expr", ",", "symbols", ")", ":", "jac", "=", "[", "]", "for", "symbol", "in", "symbols", ":", "# Differentiate to every param", "f", "=", "sympy", ".", "diff", "(", "expr", ",", "symbol", ")", "jac", ".", "append", "(", "f", ")", "return", "jac" ]
Derive a symbolic expr w.r.t. each symbol in symbols. This returns a symbolic jacobian vector. :param expr: A sympy Expr. :param symbols: The symbols w.r.t. which to derive.
[ "Derive", "a", "symbolic", "expr", "w", ".", "r", ".", "t", ".", "each", "symbol", "in", "symbols", ".", "This", "returns", "a", "symbolic", "jacobian", "vector", "." ]
759dd3d1d4270510d651f40b23dd26b1b10eee83
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L300-L312
5,599
tBuLi/symfit
symfit/core/support.py
name
def name(self): """ Save name which can be used for alphabetic sorting and can be turned into a kwarg. """ base_str = 'd{}{}_'.format(self.derivative_count if self.derivative_count > 1 else '', self.expr) for var, count in self.variable_count: base_str += 'd{}{}'.format(var, count if count > 1 else '') return base_str
python
def name(self): """ Save name which can be used for alphabetic sorting and can be turned into a kwarg. """ base_str = 'd{}{}_'.format(self.derivative_count if self.derivative_count > 1 else '', self.expr) for var, count in self.variable_count: base_str += 'd{}{}'.format(var, count if count > 1 else '') return base_str
[ "def", "name", "(", "self", ")", ":", "base_str", "=", "'d{}{}_'", ".", "format", "(", "self", ".", "derivative_count", "if", "self", ".", "derivative_count", ">", "1", "else", "''", ",", "self", ".", "expr", ")", "for", "var", ",", "count", "in", "self", ".", "variable_count", ":", "base_str", "+=", "'d{}{}'", ".", "format", "(", "var", ",", "count", "if", "count", ">", "1", "else", "''", ")", "return", "base_str" ]
Save name which can be used for alphabetic sorting and can be turned into a kwarg.
[ "Save", "name", "which", "can", "be", "used", "for", "alphabetic", "sorting", "and", "can", "be", "turned", "into", "a", "kwarg", "." ]
759dd3d1d4270510d651f40b23dd26b1b10eee83
https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L433-L442