code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def _get_manifest_data(self): with tempfile.NamedTemporaryFile(delete=True) as tmp: try: self.s3.download_fileobj(self.sitename, self.manifest_file, tmp) tmp.seek(0) data = tmp.read() if data is not None: return data.split(",") except Exception as ex: pass return []
Return the list of items in the manifest :return: list
def _yass_vars(self): utc = arrow.utcnow() return { "NAME": __title__, "VERSION": __version__, "URL": __uri__, "GENERATOR": "%s %s" % (__title__, __version__), "YEAR": utc.year }
Global variables
def _get_page_meta(self, page): meta = self._pages_meta.get(page) if not meta: src_file = os.path.join(self.pages_dir, page) with open(src_file) as f: _, _ext = os.path.splitext(src_file) markup = _ext.replace(".", "") _meta, _ = frontmatter.parse(f.read()) meta = self.default_page_meta.copy() meta["meta"].update(self.config.get("site.meta", {})) meta.update(_meta) dest_file, url = self._get_dest_file_and_url(page, meta) meta["url"] = url meta["filepath"] = dest_file if meta.get("markup") is None: meta["markup"] = markup self._pages_meta[page] = meta return meta
Cache the page meta from the frontmatter and assign new keys The cache data will be used to build links or other properties
def _get_page_content(self, page): src_file = os.path.join(self.pages_dir, page) with open(src_file) as f: _meta, content = frontmatter.parse(f.read()) return content
Get the page content without the frontmatter
def _link_to(self, page, text=None, title=None, _class="", id="", alt="", **kwargs): anchor = "" if "#" in page: page, anchor = page.split("#") anchor = "#" + anchor meta = self._get_page_meta(page) return "<a href='{url}' class='{_class}' id='{id}' title=\"{title}\">{text}</a>".format( url=meta.get("url", "/") + anchor, text=text or meta.get("title") or title, title=title or "", _class=_class, id=id )
Build the A HREF LINK To a page.
def _url_to(self, page): anchor = "" if "#" in page: page, anchor = page.split("#") anchor = "#" + anchor meta = self._get_page_meta(page) return meta.get("url")
Get the url of a page
def _get_dest_file_and_url(self, filepath, page_meta={}): filename = filepath.split("/")[-1] filepath_base = filepath.replace(filename, "").rstrip("/") slug = page_meta.get("slug") fname = slugify(slug) if slug else filename \ .replace(".html", "") \ .replace(".md", "") \ .replace(".jade", "") if page_meta.get("pretty_url") is False: dest_file = os.path.join(filepath_base, "%s.html" % fname) else: dest_dir = filepath_base if filename not in ["index.html", "index.md", "index.jade"]: dest_dir = os.path.join(filepath_base, fname) dest_file = os.path.join(dest_dir, "index.html") url = "/" + dest_file.replace("index.html", "") return dest_file, url
Return tuple of the file destination and url
def build_static(self): if not os.path.isdir(self.build_static_dir): os.makedirs(self.build_static_dir) copy_tree(self.static_dir, self.build_static_dir) if self.webassets_cmd: self.webassets_cmd.build()
Build static files
def build_pages(self): for root, _, files in os.walk(self.pages_dir): base_dir = root.replace(self.pages_dir, "").lstrip("/") if not base_dir.startswith("_"): for f in files: src_file = os.path.join(base_dir, f) self._build_page(src_file)
Iterate over the pages_dir and build the pages
def publish(self, target="S3", sitename=None, purge_files=True): self.build() endpoint = self.config.get("hosting.%s" % target) if target.upper() == "S3": p = publisher.S3Website(sitename=sitename or self.config.get("sitename"), aws_access_key_id=endpoint.get("aws_access_key_id"), aws_secret_access_key=endpoint.get("aws_secret_access_key"), region=endpoint.get("aws_region")) if not p.website_exists: if p.create_website() is True: # Need to give it enough time to create it # Should be a one time thing time.sleep(10) p.create_www_website() p.create_manifest_from_s3_files() if purge_files: exclude_files = endpoint.get("purge_exclude_files", []) p.purge_files(exclude_files=exclude_files) p.upload(self.build_dir) return p.website_endpoint_url
To publish programatically :param target: Where to pusblish at, S3 :param sitename: The site name :param purge_files: if True, it will delete old files :return:
def create_refund_transaction(cls, refund_transaction, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_refund_transaction_with_http_info(refund_transaction, **kwargs) else: (data) = cls._create_refund_transaction_with_http_info(refund_transaction, **kwargs) return data
Create RefundTransaction Create a new RefundTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_refund_transaction(refund_transaction, async=True) >>> result = thread.get() :param async bool :param RefundTransaction refund_transaction: Attributes of refundTransaction to create (required) :return: RefundTransaction If the method is called asynchronously, returns the request thread.
def delete_refund_transaction_by_id(cls, refund_transaction_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs) else: (data) = cls._delete_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs) return data
Delete RefundTransaction Delete an instance of RefundTransaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_refund_transaction_by_id(refund_transaction_id, async=True) >>> result = thread.get() :param async bool :param str refund_transaction_id: ID of refundTransaction to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_refund_transaction_by_id(cls, refund_transaction_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs) else: (data) = cls._get_refund_transaction_by_id_with_http_info(refund_transaction_id, **kwargs) return data
Find RefundTransaction Return single instance of RefundTransaction by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_refund_transaction_by_id(refund_transaction_id, async=True) >>> result = thread.get() :param async bool :param str refund_transaction_id: ID of refundTransaction to return (required) :return: RefundTransaction If the method is called asynchronously, returns the request thread.
def list_all_refund_transactions(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_refund_transactions_with_http_info(**kwargs) else: (data) = cls._list_all_refund_transactions_with_http_info(**kwargs) return data
List RefundTransactions Return a list of RefundTransactions This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_refund_transactions(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[RefundTransaction] If the method is called asynchronously, returns the request thread.
def replace_refund_transaction_by_id(cls, refund_transaction_id, refund_transaction, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_refund_transaction_by_id_with_http_info(refund_transaction_id, refund_transaction, **kwargs) else: (data) = cls._replace_refund_transaction_by_id_with_http_info(refund_transaction_id, refund_transaction, **kwargs) return data
Replace RefundTransaction Replace all attributes of RefundTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_refund_transaction_by_id(refund_transaction_id, refund_transaction, async=True) >>> result = thread.get() :param async bool :param str refund_transaction_id: ID of refundTransaction to replace (required) :param RefundTransaction refund_transaction: Attributes of refundTransaction to replace (required) :return: RefundTransaction If the method is called asynchronously, returns the request thread.
def update_refund_transaction_by_id(cls, refund_transaction_id, refund_transaction, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_refund_transaction_by_id_with_http_info(refund_transaction_id, refund_transaction, **kwargs) else: (data) = cls._update_refund_transaction_by_id_with_http_info(refund_transaction_id, refund_transaction, **kwargs) return data
Update RefundTransaction Update attributes of RefundTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_refund_transaction_by_id(refund_transaction_id, refund_transaction, async=True) >>> result = thread.get() :param async bool :param str refund_transaction_id: ID of refundTransaction to update. (required) :param RefundTransaction refund_transaction: Attributes of refundTransaction to update. (required) :return: RefundTransaction If the method is called asynchronously, returns the request thread.
def get_skos_registry(registry): ''' Get the :class:`skosprovider.registry.Registry` attached to this pyramid application. :rtype: :class:`skosprovider.registry.Registry` ''' # Argument might be a config or request regis = getattr(registry, 'registry', None) if regis is None: regis = registry return regis.queryUtility(ISkosRegistryf get_skos_registry(registry): ''' Get the :class:`skosprovider.registry.Registry` attached to this pyramid application. :rtype: :class:`skosprovider.registry.Registry` ''' # Argument might be a config or request regis = getattr(registry, 'registry', None) if regis is None: regis = registry return regis.queryUtility(ISkosRegistry)
Get the :class:`skosprovider.registry.Registry` attached to this pyramid application. :rtype: :class:`skosprovider.registry.Registry`
def sort_matches(matches): '''Sorts a ``list`` of matches best to worst''' multipliers = {'exact':10**5,'fname':10**4,'fuzzy':10**2,'fuzzy_fragment':1} matches = [(multipliers[x.type]*(x.amount if x.amount else 1),x) for x in matches] return [x[1] for x in sorted(matches,reverse=True)f sort_matches(matches): '''Sorts a ``list`` of matches best to worst''' multipliers = {'exact':10**5,'fname':10**4,'fuzzy':10**2,'fuzzy_fragment':1} matches = [(multipliers[x.type]*(x.amount if x.amount else 1),x) for x in matches] return [x[1] for x in sorted(matches,reverse=True)]
Sorts a ``list`` of matches best to worst
def matches(self,string,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False): '''Returns whether this :class:`Concept` matches ``string``''' matches = [] for item in self.examples: m = best_match_from_list(string,self.examples[item],fuzzy,fname_match,fuzzy_fragment,guess) if m: match = ConceptMatch(self) match.concept = self match.string = string match.item = item match.examples = m[0] match.type = m[2] match.amount = m[3] matches.append(match) return sort_matches(matchesf matches(self,string,fuzzy=90,fname_match=True,fuzzy_fragment=None,guess=False): '''Returns whether this :class:`Concept` matches ``string``''' matches = [] for item in self.examples: m = best_match_from_list(string,self.examples[item],fuzzy,fname_match,fuzzy_fragment,guess) if m: match = ConceptMatch(self) match.concept = self match.string = string match.item = item match.examples = m[0] match.type = m[2] match.amount = m[3] matches.append(match) return sort_matches(matches)
Returns whether this :class:`Concept` matches ``string``
def set_action(self,concept_name,action_meth): '''helper function to set the ``action`` attr of any :class:`Concept`s in ``self.vocab`` that match ``concept_name`` to ``action_meth``''' for concept in self.vocab: if concept.name == concept_name: concept.action = action_metf set_action(self,concept_name,action_meth): '''helper function to set the ``action`` attr of any :class:`Concept`s in ``self.vocab`` that match ``concept_name`` to ``action_meth``''' for concept in self.vocab: if concept.name == concept_name: concept.action = action_meth
helper function to set the ``action`` attr of any :class:`Concept`s in ``self.vocab`` that match ``concept_name`` to ``action_meth``
def match_all_concepts(self,string): '''Returns sorted list of all :class:`Concept`s matching ``string``''' multipliers = {'exact':10**5,'fname':10**4,'fuzzy':10**2,'fuzzy_fragment':1} matches = [] for concept in self.vocab: matches += concept.matches(string,self.fuzzy,self.fname_match,self.fuzzy_fragment,self.guess) return sort_matches(matchesf match_all_concepts(self,string): '''Returns sorted list of all :class:`Concept`s matching ``string``''' multipliers = {'exact':10**5,'fname':10**4,'fuzzy':10**2,'fuzzy_fragment':1} matches = [] for concept in self.vocab: matches += concept.matches(string,self.fuzzy,self.fname_match,self.fuzzy_fragment,self.guess) return sort_matches(matches)
Returns sorted list of all :class:`Concept`s matching ``string``
def match_concept(self,string): '''Find all matches in this :class:`Bottle` for ``string`` and return the best match''' matches = self.match_all_concepts(string) if len(matches)>0: return matches[0] return Nonf match_concept(self,string): '''Find all matches in this :class:`Bottle` for ``string`` and return the best match''' matches = self.match_all_concepts(string) if len(matches)>0: return matches[0] return None
Find all matches in this :class:`Bottle` for ``string`` and return the best match
def parse_string(self,string,best=False): '''Parses ``string`` trying to match each word to a :class:`Concept`. If ``best``, will only return the top matches''' if isinstance(string,list): items = string else: items = string.split() item_list = [] not_next = False for item in items: if self.negative: if item=='not': not_next = True continue if item[0]=='-': not_next = True item = item[1:] concepts = self.match_all_concepts(item) if len(concepts)>0: if not_next: for concept in concepts: concept.negative = True if best: item_list.append(concepts[0]) else: item_list.append(concepts) else: item_list.append(item) not_next = False return item_lisf parse_string(self,string,best=False): '''Parses ``string`` trying to match each word to a :class:`Concept`. If ``best``, will only return the top matches''' if isinstance(string,list): items = string else: items = string.split() item_list = [] not_next = False for item in items: if self.negative: if item=='not': not_next = True continue if item[0]=='-': not_next = True item = item[1:] concepts = self.match_all_concepts(item) if len(concepts)>0: if not_next: for concept in concepts: concept.negative = True if best: item_list.append(concepts[0]) else: item_list.append(concepts) else: item_list.append(item) not_next = False return item_list
Parses ``string`` trying to match each word to a :class:`Concept`. If ``best``, will only return the top matches
def process_string(self,string): '''Searches the string (or list of strings) for an action word (a :class:`Concept` that has and ``action`` attached to it), then calls the appropriate function with a dictionary of the identified words (according to ``vocab``). For examples, see ``demo.py`` ''' item_list = self.parse_string(string) for item in item_list: if len(item)>0 and 'concept' in dir(item[0]) and 'action' in dir(item[0].concept) and item[0].concept.action: item[0].concept.action(item_listf process_string(self,string): '''Searches the string (or list of strings) for an action word (a :class:`Concept` that has and ``action`` attached to it), then calls the appropriate function with a dictionary of the identified words (according to ``vocab``). For examples, see ``demo.py`` ''' item_list = self.parse_string(string) for item in item_list: if len(item)>0 and 'concept' in dir(item[0]) and 'action' in dir(item[0].concept) and item[0].concept.action: item[0].concept.action(item_list)
Searches the string (or list of strings) for an action word (a :class:`Concept` that has and ``action`` attached to it), then calls the appropriate function with a dictionary of the identified words (according to ``vocab``). For examples, see ``demo.py``
def all(self): response = requests.get(self._url, **self._default_request_kwargs) data = self._get_response_data(response) return self._concrete_instance_list(data)
Get all ObjectRocket instances the current client has access to. :returns: A list of :py:class:`bases.BaseInstance` instances. :rtype: list
def create(self, name, plan, zone, service_type='mongodb', instance_type='mongodb_sharded', version='2.4.6'): # Build up request data. url = self._url request_data = { 'name': name, 'service': service_type, 'plan': plan, 'type': instance_type, 'version': version, 'zone': zone } # Call to create an instance. response = requests.post( url, data=json.dumps(request_data), **self._default_request_kwargs ) # Log outcome of instance creation request. if response.status_code == 200: logger.info('Successfully created a new instance with: {}'.format(request_data)) else: logger.info('Failed to create instance with: {}'.format(request_data)) logger.info('Response: [{0}] {1}'.format(response.status_code, response.content)) data = self._get_response_data(response) return self._concrete_instance(data)
Create an ObjectRocket instance. :param str name: The name to give to the new instance. :param int plan: The plan size of the new instance. :param str zone: The zone that the new instance is to exist in. :param str service_type: The type of service that the new instance is to provide. :param str instance_type: The instance type to create. :param str version: The version of the service the new instance is to provide.
def get(self, instance_name): url = self._url + instance_name + '/' response = requests.get(url, **self._default_request_kwargs) data = self._get_response_data(response) return self._concrete_instance(data)
Get an ObjectRocket instance by name. :param str instance_name: The name of the instance to retrieve. :returns: A subclass of :py:class:`bases.BaseInstance`, or None if instance does not exist. :rtype: :py:class:`bases.BaseInstance`
def _concrete_instance(self, instance_doc): if not isinstance(instance_doc, dict): return None # Attempt to instantiate the appropriate class for the given instance document. try: service = instance_doc['service'] cls = self._service_class_map[service] return cls(instance_document=instance_doc, instances=self) # If construction fails, log the exception and return None. except Exception as ex: logger.exception(ex) logger.error( 'Instance construction failed. You probably need to upgrade to a more ' 'recent version of the client. Instance document which generated this ' 'warning: {}'.format(instance_doc) ) return None
Concretize an instance document. :param dict instance_doc: A document describing an instance. Should come from the API. :returns: A subclass of :py:class:`bases.BaseInstance`, or None. :rtype: :py:class:`bases.BaseInstance`
def _concrete_instance_list(self, instance_docs): if not instance_docs: return [] return list( filter(None, [self._concrete_instance(instance_doc=doc) for doc in instance_docs]) )
Concretize a list of instance documents. :param list instance_docs: A list of instance documents. Should come from the API. :returns: A list of :py:class:`bases.BaseInstance`s. :rtype: list
def load_plugin_by_name(name): plugins = load(PLUGIN_NAMESPACE) full_name = "%s.%s" % (PLUGIN_NAMESPACE, name) try: plugins = (plugin for plugin in plugins if plugin.__name__ == full_name) plugin = next(plugins) return plugin except StopIteration: raise UnknownPlugin([plugin.__name__.split('.').pop() for plugin in plugins])
Load the plugin with the specified name. >>> plugin = load_plugin_by_name('default') >>> api = dir(plugin) >>> 'build_package' in api True >>> 'get_version' in api True >>> 'set_package_version' in api True >>> 'set_version' in api True
def change_directory(path=None): if path is not None: try: oldpwd = getcwd() logger.debug('changing directory from %s to %s' % (oldpwd, path)) chdir(path) yield finally: chdir(oldpwd) else: yield
Context manager that changes directory and resets it when existing >>> with change_directory('/tmp'): >>> pass
def empty_directory(path=None): install_dir = tempfile.mkdtemp(dir=path) try: yield install_dir finally: shutil.rmtree(install_dir)
Context manager that creates a temporary directory, and cleans it up when exiting. >>> with empty_directory(): >>> pass
def get(self, name): path = self._get_cluster_storage_path(name) try: with open(path, 'r') as storage: cluster = self.load(storage) # Compatibility with previous version of Node for node in sum(cluster.nodes.values(), []): if not hasattr(node, 'ips'): log.debug("Monkey patching old version of `Node` class: %s", node.name) node.ips = [node.ip_public, node.ip_private] node.preferred_ip = None cluster.storage_file = path return cluster except IOError as ex: raise ClusterNotFound("Error accessing storage file %s: %s" % (path, ex))
Retrieves the cluster with the given name. :param str name: name of the cluster (identifier) :return: :py:class:`elasticluster.cluster.Cluster`
def save_or_update(self, cluster): if not os.path.exists(self.storage_path): os.makedirs(self.storage_path) path = self._get_cluster_storage_path(cluster.name) cluster.storage_file = path with open(path, 'wb') as storage: self.dump(cluster, storage)
Save or update the cluster to persistent state. :param cluster: cluster to save or update :type cluster: :py:class:`elasticluster.cluster.Cluster`
def _get_store_by_name(self, name): for cls in self.storage_type_map.values(): cluster_files = glob.glob( '%s/%s.%s' % (self.storage_path, name, cls.file_ending)) if cluster_files: try: return cls(self.storage_path) except: continue raise ClusterNotFound("No cluster %s was found" % name)
Return an instance of the correct DiskRepository based on the *first* file that matches the standard syntax for repository files
def _format(color, style=''): _color = QColor() _color.setNamedColor(color) _format = QTextCharFormat() _format.setForeground(_color) if 'bold' in style: _format.setFontWeight(QFont.Bold) if 'italic' in style: _format.setFontItalic(True) return _format
Return a QTextCharFormat with the given attributes.
def highlightBlock(self, text): # Do other syntax formatting for expression, nth, format in self.rules: index = expression.indexIn(text, 0) while index >= 0: # We actually want the index of the nth match index = expression.pos(nth) length = len(expression.cap(nth)) self.setFormat(index, length, format) index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) # Do multi-line strings in_multiline = self.match_multiline(text, *self.tri_single) if not in_multiline: in_multiline = self.match_multiline(text, *self.tri_double)
Apply syntax highlighting to the given block of text.
def match_multiline(self, text, delimiter, in_state, style): # If inside triple-single quotes, start at 0 if self.previousBlockState() == in_state: start = 0 add = 0 # Otherwise, look for the delimiter on this line else: start = delimiter.indexIn(text) # Move past this match add = delimiter.matchedLength() # As long as there's a delimiter match on this line... while start >= 0: # Look for the ending delimiter end = delimiter.indexIn(text, start + add) # Ending delimiter on this line? if end >= add: length = end - start + add + delimiter.matchedLength() self.setCurrentBlockState(0) # No; multi-line string else: self.setCurrentBlockState(in_state) length = len(text) - start + add # Apply formatting self.setFormat(start, length, style) # Look for the next match start = delimiter.indexIn(text, start + length) # Return True if still inside a multi-line string, False otherwise if self.currentBlockState() == in_state: return True else: return False
Do highlighting of multi-line strings. ``delimiter`` should be a ``QRegExp`` for triple-single-quotes or triple-double-quotes, and ``in_state`` should be a unique integer to represent the corresponding state changes when inside those strings. Returns True if we're still inside a multi-line string when this function is finished.
def __import_pem(self, key_name, pem_file_path, password): key_import = self.__get_function_or_ex_function('import_key_pair_from_file') pem_file = os.path.expandvars(os.path.expanduser(pem_file_path)) try: pem = paramiko.RSAKey.from_private_key_file(pem_file, password) except SSHException: try: pem = paramiko.DSSKey.from_private_key_file(pem_file, password) except SSHException as e: raise KeypairError('could not import {f}, neither as RSA key nor as DSA key: {e}' .format(f=pem_file_path, e=e)) if not pem: raise KeypairError('could not import {f}'.format(f=pem_file_path)) else: with NamedTemporaryFile('w+t') as f: f.write('{n} {p}'.format(n=pem.get_name(), p=pem.get_base64())) key_import(name=key_name, key_file_path=f.name)
Import PEM certificate with provider :param key_name: name of the key to import :param pem_file_path: path to the pem file :param password: optional password for the pem file
def __get_function_by_pattern(self, pattern): function_names = [name for name in dir(self.driver) if pattern in name] if function_names: name = function_names[0] if len(function_names) > 1: log.warn( "Several functions match pattern `%s`: %r -- using first one!", pattern, function_names) return getattr(self.driver, name) else: # no such function raise AttributeError( "No function name contains `{0}` in class `{1}`" .format(pattern, self.__class__.__name__))
Return first function whose name *contains* the string `pattern`. :param func: partial function name (ex. key_pair) :return: list function that goes with it (ex. list_key_pairs)
def __get_function_or_ex_function(self, func_name): # try function name as given try: return getattr(self.driver, func_name) except AttributeError: pass # try prefixing name with `ex_` try: return getattr(self.driver, 'ex_' + func_name) except AttributeError: pass # no such function raise AttributeError( "No function named `{0}` or `{1}` in class `{2}`" .format(func_name, 'ex_'+func_name, self.__class__.__name__))
Check if a function (or an 'extended' function) exists for a key on a driver, and if it does, return it. :param func_name: name of the function :return: a callable or none
def __pop_driver_auth_args(**kwargs): if 'username' in kwargs: return [kwargs.pop('username'), kwargs.pop('password')] elif 'access_token' in kwargs: return kwargs.pop('access token') elif 'access_id' in kwargs: return kwargs.pop('access_id'), kwargs.pop('secret_key') elif 'service_account_email' in kwargs: return [kwargs.pop('service_account_email'), kwargs.pop('pem_file')] elif 'client_id' in kwargs: return [kwargs.pop('client_id'), kwargs.pop('client_secret')] return None
Try to construct the arguments that should be passed as initialization of a driver :param kwargs: options passed to the class :return: args or none
def from_dict(cls, D, is_json=False): '''This factory for :class:`Model` takes either a native Python dictionary or a JSON dictionary/object if ``is_json`` is ``True``. The dictionary passed does not need to contain all of the values that the Model declares. ''' instance = cls() instance.set_data(D, is_json=is_json) return instancf from_dict(cls, D, is_json=False): '''This factory for :class:`Model` takes either a native Python dictionary or a JSON dictionary/object if ``is_json`` is ``True``. The dictionary passed does not need to contain all of the values that the Model declares. ''' instance = cls() instance.set_data(D, is_json=is_json) return instance
This factory for :class:`Model` takes either a native Python dictionary or a JSON dictionary/object if ``is_json`` is ``True``. The dictionary passed does not need to contain all of the values that the Model declares.
def add_field(self, key, value, field): ''':meth:`add_field` must be used to add a field to an existing instance of Model. This method is required so that serialization of the data is possible. Data on existing fields (defined in the class) can be reassigned without using this method. ''' self._extra[key] = field setattr(self, key, valuef add_field(self, key, value, field): ''':meth:`add_field` must be used to add a field to an existing instance of Model. This method is required so that serialization of the data is possible. Data on existing fields (defined in the class) can be reassigned without using this method. ''' self._extra[key] = field setattr(self, key, value)
:meth:`add_field` must be used to add a field to an existing instance of Model. This method is required so that serialization of the data is possible. Data on existing fields (defined in the class) can be reassigned without using this method.
def to_dict(self, serial=False): '''A dictionary representing the the data of the class is returned. Native Python objects will still exist in this dictionary (for example, a ``datetime`` object will be returned rather than a string) unless ``serial`` is set to True. ''' if serial: return dict((key, self._fields[key].to_serial(getattr(self, key))) for key in list(self._fields.keys()) if hasattr(self, key)) else: return dict((key, getattr(self, key)) for key in list(self._fields.keys()) if hasattr(self, key)f to_dict(self, serial=False): '''A dictionary representing the the data of the class is returned. Native Python objects will still exist in this dictionary (for example, a ``datetime`` object will be returned rather than a string) unless ``serial`` is set to True. ''' if serial: return dict((key, self._fields[key].to_serial(getattr(self, key))) for key in list(self._fields.keys()) if hasattr(self, key)) else: return dict((key, getattr(self, key)) for key in list(self._fields.keys()) if hasattr(self, key))
A dictionary representing the the data of the class is returned. Native Python objects will still exist in this dictionary (for example, a ``datetime`` object will be returned rather than a string) unless ``serial`` is set to True.
def run(self, loopinfo=None, batch_size=1): logger.info("{}.Starting...".format(self.__class__.__name__)) if loopinfo: while True: for topic in self.topics: self.call_kafka(topic, batch_size) time.sleep(loopinfo.sleep) else: for topic in self.topics: self.call_kafka(topic, batch_size)
Run consumer
def import_from_string(path, slient=True): names = path.split(".") for i in range(len(names), 0, -1): p1 = ".".join(names[0:i]) module = import_module(p1) if module: p2 = ".".join(names[i:]) if p2: return select(module, p2, slient=slient) else: return module name = names[0] names = names[1:] module = get_caller_locals().get(name) if module and names: return select(module, ".".join(names), slient=slient) if module: return module module = get_caller_globals().get(name) if module and names: return select(module, ".".join(names), slient=slient) if module: return module if slient: return None else: raise ImportError("Import {path} failed.".format(path=path))
根据给定的对象路径,动态加载对象。
def parse_search_url(url): config = {} url = urlparse.urlparse(url) # Remove query strings. path = url.path[1:] path = path.split('?', 2)[0] if url.scheme in SEARCH_SCHEMES: config["ENGINE"] = SEARCH_SCHEMES[url.scheme] if url.scheme in USES_URL: config["URL"] = urlparse.urlunparse(("http",) + url[1:]) if url.scheme in USES_INDEX: if path.endswith("/"): path = path[:-1] split = path.rsplit("/", 1) if len(split) > 1: path = split[:-1] index = split[-1] else: path = "" index = split[0] config.update({ "URL": urlparse.urlunparse(("http",) + url[1:2] + (path,) + url[3:]), "INDEX_NAME": index, }) if url.scheme in USES_PATH: config.update({ "PATH": path, }) return config
Parses a search URL.
def config(name='SEARCH_URL', default='simple://'): config = {} s = env(name, default) if s: config = parse_search_url(s) return config
Returns configured SEARCH dictionary from SEARCH_URL
def _control_nonterminal(nonterm): # type: (Type[Nonterminal]) -> None if not inspect.isclass(nonterm) or not issubclass(nonterm, Nonterminal): raise NotNonterminalException(nonterm)
Check if the nonterminal is valid. :param nonterm: Nonterminal to check. :raise NotNonterminalException: If the object doesn't inherit from Nonterminal class.
def add(self, *nonterminals): # type: (Iterable[Type[Nonterminal]]) -> None for nonterm in nonterminals: if nonterm in self: continue _NonterminalSet._control_nonterminal(nonterm) super().add(nonterm) self._assign_map[nonterm] = set()
Add nonterminals into the set. :param nonterminals: Nonterminals to insert. :raise NotNonterminalException: If the object doesn't inherit from Nonterminal class.
def remove(self, *nonterminals): # type: (Iterable[Type[Nonterminal]]) -> None for nonterm in set(nonterminals): _NonterminalSet._control_nonterminal(nonterm) if nonterm not in self: raise KeyError('Nonterminal ' + nonterm.__name__ + ' is not inside') self._grammar.rules.remove(*self._assign_map[nonterm], _validate=False) del self._assign_map[nonterm] if self._grammar.start is nonterm: del self._grammar.start super().remove(nonterm)
Remove nonterminals from the set. Removes also rules using this nonterminal. Set start symbol to None if deleting nonterminal is start symbol at the same time. :param nonterminals: Nonterminals to remove.
def wash_html_id(dirty): import re if not dirty[0].isalpha(): # we make sure that the first character is a lowercase letter dirty = 'i' + dirty non_word = re.compile(r'[^\w]+') return non_word.sub('', dirty)
Strip non-alphabetic or newline characters from a given string. It can be used as a HTML element ID (also with jQuery and in all browsers). :param dirty: the string to wash :returns: the HTML ID ready string
def quote(self, text=None): text = text or re.sub(r'\[quote=.+?\[/quote\]', '', self.text, flags=re.DOTALL ).strip('\n') return f'[quote={self.author.id};{self.id}]{text}[/quote]'
Quote this post. Parameters ---------- text : str Text to quote. Defaults to the whole text of the post. Returns ------- str A NationStates bbCode quote of the post.
async def factbook(self, root): # This lib might have been a mistake, but the line below # definitely isn't. return html.unescape(html.unescape(root.find('FACTBOOK').text))
Region's World Factbook Entry. Returns ------- an :class:`ApiQuery` of str
async def delegate(self, root): nation = root.find('DELEGATE').text if nation == '0': return None return aionationstates.Nation(nation)
Regional World Assembly Delegate. Returns ------- an :class:`ApiQuery` of :class:`Nation` an :class:`ApiQuery` of None If the region has no delegate.
async def founder(self, root): nation = root.find('FOUNDER').text if nation == '0': return None return aionationstates.Nation(nation)
Regional Founder. Returned even if the nation has ceased to exist. Returns ------- an :class:`ApiQuery` of :class:`Nation` an :class:`ApiQuery` of None If the region is Game-Created and doesn't have a founder.
async def officers(self, root): officers = sorted( root.find('OFFICERS'), # I struggle to say what else this tag would be useful for. key=lambda elem: int(elem.find('ORDER').text) ) return [Officer(elem) for elem in officers]
Regional Officers. Does not include the Founder or the Delegate, unless they have additional titles as Officers. In the correct order. Returns ------- an :class:`ApiQuery` of a list of :class:`Officer`
async def messages(self): # Messages may be posted on the RMB while the generator is running. oldest_id_seen = float('inf') for offset in count(step=100): posts_bunch = await self._get_messages(offset=offset) for post in reversed(posts_bunch): if post.id < oldest_id_seen: yield post oldest_id_seen = posts_bunch[0].id if len(posts_bunch) < 100: break
Iterate through RMB posts from newest to oldest. Returns ------- an asynchronous generator that yields :class:`Post`
def accumulate_items(items, reduce_each=False): if not items: return {} accumulated = defaultdict(list) for key, val in items: accumulated[key].append(val) if not reduce_each: return accumulated else: return {k: reduce_value(v, v) for k, v in iteritems(accumulated)}
:return: item pairs as key: val, with vals under duplicate keys accumulated under each
def _to_key_val_pairs(defs): if isinstance(defs, STRING_TYPES): # Convert 'a' to [('a', None)], or 'a.b.c' to [('a', 'b.c')] return [defs.split('.', 1) if '.' in defs else (defs, None)] else: pairs = [] # Convert collections of strings or lists as above; break dicts into component items pairs.extend(p for s in defs if isinstance(s, STRING_TYPES) for p in _to_key_val_pairs(s)) pairs.extend(p for l in defs if isinstance(l, list) for p in _to_key_val_pairs(l)) pairs.extend(p for d in defs if isinstance(d, dict) for p in iteritems(d)) return pairs
Helper to split strings, lists and dicts into (current, value) tuples for accumulation
def filter_empty(values, default=None): if values is None: return default elif hasattr(values, '__len__') and len(values) == 0: return default elif hasattr(values, '__iter__') and not isinstance(values, _filtered_types): filtered = type(values) if isinstance(values, _filter_types) else list values = filtered( v for v in values if not (v is None or (hasattr(v, '__len__') and len(v) == 0)) ) return default if len(values) == 0 else values return values
Eliminates None or empty items from lists, tuples or sets passed in. If values is None or empty after filtering, the default is returned.
def flatten_items(items, recurse=False): if not items: return items elif not hasattr(items, '__iter__'): return items elif isinstance(items, _flattened_types): return items flattened = [] for item in items: if item and hasattr(item, '__iter__') and not isinstance(item, _flattened_types): flattened.extend(flatten_items(item, True) if recurse else item) else: flattened.append(item) return type(items)(flattened) if isinstance(items, _flatten_types) else flattened
Expands inner lists (tuples, sets, Etc.) within items so that each extends its parent. If items is None or empty after filtering, the default is returned. If recurse is False, only the first level of items is flattened, otherwise all levels.
def remove_duplicates(items, in_reverse=False, is_unhashable=False): if not items: return items elif isinstance(items, _removed_dup_types): return items elif not hasattr(items, '__iter__') and not hasattr(items, '__getitem__'): return items _items = items if in_reverse: subscriptable = hasattr(items, '__getitem__') _items = items[::-1] if subscriptable else reversed([i for i in items]) is_unhashable &= not isinstance(items, STRING_TYPES) buffer = list() if is_unhashable else set() append = buffer.append if is_unhashable else buffer.add if not isinstance(items, _remove_dup_types): # The fastest case: handles lists (33% of other cases) and generators (25%) unique = [i for i in _items if i not in buffer and not append(i)] elif isinstance(items, tuple): unique = tuple(i for i in _items if i not in buffer and not append(i)) elif isinstance(items, string_types): unique = EMPTY_STR.join(i for i in _items if i not in buffer and not append(i)) else: # Python 3 compliance: for bytearrays, convert integers back to bytes during iteration unique = EMPTY_BIN.join(bytes([i]) for i in _items if i not in buffer and not append(i)) return unique if not in_reverse else unique[::-1]
With maximum performance, iterate over items and return unique ordered values. :param items: an iterable of values: lists, tuples, strings, or generator :param in_reverse: if True, iterate backwards to remove initial duplicates (less performant) :param is_unhashable: if False, use a set to track duplicates; otherwise a list (less performant) :return: a unique ordered list, tuple or string depending on the type of items
def rfind(values, value): if isinstance(values, STRING_TYPES): try: return values.rfind(value) except TypeError: # Python 3 compliance: search for str values in bytearray return values.rfind(type(values)(value, DEFAULT_ENCODING)) else: try: return len(values) - 1 - values[::-1].index(value) except (TypeError, ValueError): return -1
:return: the highest index in values where value is found, or -1
def rindex(values, value): if isinstance(values, STRING_TYPES): try: return values.rindex(value) except TypeError: # Python 3 compliance: search for str values in bytearray return values.rindex(type(values)(value, DEFAULT_ENCODING)) else: return len(values) - 1 - values[::-1].index(value)
:return: the highest index in values where value is found, else raise ValueError
def reduce_value(value, default=EMPTY_STR): if hasattr(value, '__len__'): vlen = len(value) if vlen == 0: return default elif vlen == 1: if isinstance(value, set): return value.pop() elif isinstance(value, _reduce_types): return value[0] return default if value is None else value
:return: a single value from lists, tuples or sets with one item; otherwise, the value itself if not empty or the default if it is.
def wrap_value(value, include_empty=False): if value is None: return [None] if include_empty else [] elif hasattr(value, '__len__') and len(value) == 0: return [value] if include_empty else [] elif isinstance(value, _wrap_types): return [value] elif not hasattr(value, '__iter__'): return [value] return value if include_empty else filter_empty(value, [])
:return: the value wrapped in a list unless it is already iterable (and not a dict); if so, empty values will be filtered out by default, and an empty list is returned.
def unit(self, unit): allowed_values = ["cm", "inch", "foot"] # noqa: E501 if unit is not None and unit not in allowed_values: raise ValueError( "Invalid value for `unit` ({0}), must be one of {1}" # noqa: E501 .format(unit, allowed_values) ) self._unit = unit
Sets the unit of this Dimensions. :param unit: The unit of this Dimensions. :type: str
def map_names(lang="en"): cache_name = "map_names.%s.json" % lang data = get_cached("map_names.json", cache_name, params=dict(lang=lang)) return dict([(item["id"], item["name"]) for item in data])
This resource returns an dictionary of the localized map names for the specified language. Only maps with events are listed - if you need a list of all maps, use ``maps.json`` instead. :param lang: The language to query the names for. :return: the response is a dictionary where the key is the map id and the value is the name of the map in the specified language.
def maps(map_id=None, lang="en"): if map_id: cache_name = "maps.%s.%s.json" % (map_id, lang) params = {"map_id": map_id, "lang": lang} else: cache_name = "maps.%s.json" % lang params = {"lang": lang} data = get_cached("maps.json", cache_name, params=params).get("maps") return data.get(str(map_id)) if map_id else data
This resource returns details about maps in the game, including details about floor and translation data on how to translate between world coordinates and map coordinates. :param map_id: Only list this map. :param lang: Show localized texts in the specified language. The response is a dictionary where the key is the map id and the value is a dictionary containing the following properties: map_name (string) The map name. min_level (number) The minimal level of this map. max_level (number) The maximum level of this map. default_floor (number) The default floor of this map. floors (list) A list of available floors for this map. region_id (number) The id of the region this map belongs to. region_name (string) The name of the region this map belongs to. continent_id (number) The id of the continent this map belongs to. continent_name (string) The name of the continent this map belongs to. map_rect (rect) The dimensions of the map. continent_rect (rect) The dimensions of the map within the continent coordinate system. If a map_id is given, only the values for that map are returned.
def get_random_value(field): func = get_factory_func(field) if field.default is not None: if callable(field.default): return field.default() return field.default if field.choices: return random.choice(field.choices) return func(field)
Calls the dispatch method (``get_factory_func``) and passes the field obj argument to the callable returned. Returns: random value depending on field type and constraints in the field object
def get_value_based_inclusive_interval(cls, field, max_value=None): if field.max_value is None: field.max_value = max_value or MAX_LENGTH if field.min_value is None: field.min_value = 0 Interval = namedtuple('interval', ['start', 'stop']) return Interval(start=field.min_value, stop=field.max_value)
This is applicable to fields with max_value and min_value as validators. Note: 1. This is different from fields with max_length as a validator 2. This means that the two methods based on value and length are almost the same method but for the max_* attribute that is being checked. Probably need to DRY this out at some point.
def make_string_field_value(cls, field): if field.regex is not None: raise NotImplementedError string_range = cls.get_range(field) return cls.get_random_string(string_range)
String Field has three constraints (apart from anything in the super class) Args: field (StringField): actual string field object from a model declaration Returns: random string value
def _async_recv(self): logging.info("Receive loop started") recbuffer = b"" while not self._stop_event.is_set(): time.sleep(0.01) try: recbuffer = recbuffer + self._socket.recv(1024) data = recbuffer.split(b'\r\n') recbuffer = data.pop() if data: for line in data: self._process_data(line.decode(encoding='UTF-8', errors='ignore')) except BlockingIOError as e: pass logging.info("Receive loop stopped")
No raw bytes should escape from this, all byte encoding and decoding should be handling inside this function
def main(arguments=None): # setup the command-line util settings su = tools( arguments=arguments, docString=__doc__, logLevel="WARNING", options_first=False, projectName="fundmentals" ) arguments, settings, log, dbConn = su.setup() # UNPACK REMAINING CL ARGUMENTS USING `EXEC` TO SETUP THE VARIABLE NAMES # AUTOMATICALLY for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = '%s'" % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug('%s = %s' % (varname, val,)) if successFlag and successFlag.lower() == "none": successFlag = None if failureFlag and failureFlag.lower() == "none": failureFlag = None directory_script_runner( log=log, pathToScriptDirectory=pathToDirectory, databaseName=databaseName, loginPath=loginPath, successRule=successFlag, failureRule=failureFlag ) return
The main function used when ``directory_script_runner.py`` is run as a single script from the cl, or when installed as a cl command
def Eoi(compiler, cont): '''end of parse_state''' return il.If(il.Ge(il.GetItem(il.parse_state, il.Integer(1)), il.Len(il.GetItem(il.parse_state, il.Integer(0)))), cont(TRUE), il.failcont(FALSE)f Eoi(compiler, cont): '''end of parse_state''' return il.If(il.Ge(il.GetItem(il.parse_state, il.Integer(1)), il.Len(il.GetItem(il.parse_state, il.Integer(0)))), cont(TRUE), il.failcont(FALSE))
end of parse_state
def Boi(compiler, cont): '''end of parse_state''' return il.If(il.Le(il.GetItem(il.parse_state, il.Integer(1)),0), cont(TRUE), il.failcont(FALSE)f Boi(compiler, cont): '''end of parse_state''' return il.If(il.Le(il.GetItem(il.parse_state, il.Integer(1)),0), cont(TRUE), il.failcont(FALSE))
end of parse_state
def get_database_table_column_names( dbConn, log, dbTable ): log.debug('starting the ``get_database_table_column_names`` function') sqlQuery = """SELECT * FROM %s LIMIT 1""" \ % (dbTable, ) # ############### >ACTION(S) ################ try: rows = readquery( log=log, sqlQuery=sqlQuery, dbConn=dbConn, ) except Exception as e: log.error( 'could not find column names for dbTable %s - failed with this error: %s ' % (dbTable, str(e))) return -1 columnNames = rows[0].keys() log.debug('completed the ``get_database_table_column_names`` function') return columnNames
get database table column names **Key Arguments:** - ``dbConn`` -- mysql database connection - ``log`` -- logger - ``dbTable`` -- database tablename **Return:** - ``columnNames`` -- table column names **Usage:** To get the column names of a table in a given database: .. code-block:: python from fundamentals.mysql import get_database_table_column_names columnNames = get_database_table_column_names( dbConn=dbConn, log=log, dbTable="test_table" )
def normalize(df, style = 'mean'): if style == 'mean': df_mean,df_std = df.mean(),df.std() return (df-df_mean)/df_std elif style == 'minmax': col_min,col_max = df.min(),df.max() return (df-col_min)/(col_max-col_min) else: return style(df)
Returns a normalized version of a DataFrame or Series Parameters: df - DataFrame or Series The data to normalize style - function or string, default 'mean' The style to use when computing the norms. Takes 'mean' or 'minmax' to do mean or min-max normalization respectively. User-defined functions that take a pandas Series as input and return a normalized pandas Series are also accepted
def norms(df, col_names = None,row_names = None,style = 'mean', as_group = False, axis = 0): if col_names is None: if row_names is not None: df = df.loc[row_names,:] else: if row_names is None: df = df.loc[:,col_names] else: df = df.loc[row_names,col_names] if as_group: return normalize(df,style) if axis == 0 or str(axis).startswith('column'): return _pd.concat([col_normalize(df,col_name,style) for col_name in df.columns],axis = 1) elif axis == 1 or str(axis).startswith('row'): return _pd.concat([row_normalize(df,row_name,style) for row_name in df.index]) else: return normalize(df,style)
Returns a normalized version of the input Dataframe Parameters: df - pandas DataFrame The input data to normalize col_names - list or string, default None The column(s) to use when computing the norms row_names - list or string, default None The row(s) to use when computing the norms style - function or string, default 'mean' The style to use when computing the norms. Takes 'mean' or 'minmax' to do mean or min-max normalization respectively. User-defined functions that take a pandas Series as input and return a normalized pandas Series are also accepted as_group - bool, default False Whether to normalize accross the entire range or by row/column. If true, will normalize by the entire DataFrame axis - int or string, default 0 Which axis to perform the normalization on. Accepts 0 or 'columns' to do normalization by columns, and 1 or 'rows' to do normalization by rows
def set_finished(self): component_name = self.get_component_name() self.log( logging.INFO, "Component [%s] is being marked as finished.", component_name) existing_state = self.__get_state(component_name) assert existing_state == fss.constants.PCS_RUNNING, \ "Can not change to 'finished' state from unsupported " \ "state: (" + str(existing_state) + ")" self.__set_data('count', self.__push_count) self.__set_state(fss.constants.PCS_FINISHED)
This stores the number of items that have been pushed, and transitions the current component to the FINISHED state (which precedes the STOPPED state). The FINISHED state isn't really necessary unless methods/hooks are overridden to depend on it, but the count must be stored at one point so that thenext components knows how many items to expect. This is done by default after the loop breaks, but can be manually called sooner, if desired.
def weight_unit(self, weight_unit): allowed_values = ["pound", "kilogram"] # noqa: E501 if weight_unit is not None and weight_unit not in allowed_values: raise ValueError( "Invalid value for `weight_unit` ({0}), must be one of {1}" # noqa: E501 .format(weight_unit, allowed_values) ) self._weight_unit = weight_unit
Sets the weight_unit of this MeasurementSettings. :param weight_unit: The weight_unit of this MeasurementSettings. :type: str
def dimensions_unit(self, dimensions_unit): allowed_values = ["inch", "cm", "foot", "meter"] # noqa: E501 if dimensions_unit is not None and dimensions_unit not in allowed_values: raise ValueError( "Invalid value for `dimensions_unit` ({0}), must be one of {1}" # noqa: E501 .format(dimensions_unit, allowed_values) ) self._dimensions_unit = dimensions_unit
Sets the dimensions_unit of this MeasurementSettings. :param dimensions_unit: The dimensions_unit of this MeasurementSettings. :type: str
def add_result_hook(self, hook: Type["QueryResultHook"]) -> Type["QueryResultHook"]: hook.next_hook = self._query_result_hook self._query_result_hook = hook return hook
Add a query result hook to the chain :param hook: hook to add :return: added hook (same as hook to add)
def already_resolved(self, pattern: QueryTriple) -> bool: if self.sparql_locked or pattern == (None, None, None): return True for resolved_node in self.resolved_nodes: if resolved_node != (None, None, None) and \ (pattern[0] == resolved_node[0] or resolved_node[0] is None) and \ (pattern[1] == resolved_node[1] or resolved_node[1] is None) and\ (pattern[2] == resolved_node[2] or resolved_node[2] is None): return True return False
Determine whether pattern has already been loaded into the cache. The "wild card" - `(None, None, None)` - always counts as resolved. :param pattern: pattern to check :return: True it is a subset of elements already loaded
def add(self, t: RDFTriple) -> None: if self.chained_hook is not None: self.chained_hook.add(t)
Add a triple as a query result :param t: triple being added
def fix_base(fix_environ): def _is_android(): import os vm_path = os.sep+"system"+os.sep+"bin"+os.sep+"dalvikvm" if os.path.exists(vm_path) or os.path.exists(os.sep+"system"+vm_path): return True try: import android del android # Unused import (imported only for Android detection) return True except ImportError: pass return False def _fix_android_environ(): import os if "LD_LIBRARY_PATH" not in os.environ: os.environ["LD_LIBRARY_PATH"] = "" lib_path = os.pathsep+"/system/lib"+os.pathsep+"/vendor/lib" if sys.python_bits == 64: lib_path = os.pathsep+"/system/lib64"+os.pathsep+"/vendor/lib64" + lib_path os.environ["LD_LIBRARY_PATH"] += lib_path if sys.platform.startswith("linux") and sys.platform != "linux-android": if _is_android(): sys.platform = "linux-android" elif "-" not in sys.platform: sys.platform = "linux" sys.platform_codename = sys.platform if sys.platform_codename == "win32": sys.platform_codename = "win" elif sys.platform_codename == "linux-android": sys.platform_codename = "android" if 'maxsize' in sys.__dict__: if sys.maxsize > 2**32: sys.python_bits = 64 else: sys.python_bits = 32 else: import struct sys.python_bits = 8 * struct.calcsize("P") if sys.python_bits == 32: sys.maxsize = 2147483647 else: sys.maxsize = int("9223372036854775807") if fix_environ and sys.platform == "linux-android": _fix_android_environ()
Activate the base compatibility.
def fix_subprocess(override_debug=False, override_exception=False): import subprocess # Exceptions if subprocess.__dict__.get("SubprocessError") is None: subprocess.SubprocessError = _Internal.SubprocessError if _InternalReferences.UsedCalledProcessError is None: if "CalledProcessError" in subprocess.__dict__: _subprocess_called_process_error(True, subprocess) else: _subprocess_called_process_error(False, subprocess) subprocess.CalledProcessError = _InternalReferences.UsedCalledProcessError def _check_output(*args, **kwargs): if "stdout" in kwargs: raise ValueError("stdout argument not allowed, " "it will be overridden.") process = subprocess.Popen(stdout=subprocess.PIPE, *args, **kwargs) stdout_data, __ = process.communicate() ret_code = process.poll() if ret_code is None: raise RuntimeWarning("The process is not yet terminated.") if ret_code: cmd = kwargs.get("args") if cmd is None: cmd = args[0] raise _InternalReferences.UsedCalledProcessError(returncode=ret_code, cmd=cmd, output=stdout_data) return stdout_data try: subprocess.check_output except AttributeError: subprocess.check_output = _check_output
Activate the subprocess compatibility.
def fix_all(override_debug=False, override_all=False): fix_base(True) fix_builtins(override_debug) fix_subprocess(override_debug, override_all) return True
Activate the full compatibility.
def smart_scrub(df,col_name,error_rate = 0): scrubf = smart_scrubf(df,col_name,error_rate) scrubb = smart_scrubb(df,col_name,error_rate) return (scrubf, scrubb)
Scrubs from the front and back of an 'object' column in a DataFrame until the scrub would semantically alter the contents of the column. If only a subset of the elements in the column are scrubbed, then a boolean array indicating which elements have been scrubbed is appended to the dataframe. Returns a tuple of the strings removed from the front and back of the elements df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number, default 0 The maximum amount of values this function can ignore while scrubbing, expressed as a fraction of the total amount of rows in the dataframe.
def smart_scrubf(df,col_name,error_rate = 0): scrubbed = "" while True: valcounts = df[col_name].str[:len(scrubbed)+1].value_counts() if not len(valcounts): break if not valcounts[0] >= (1-error_rate) * _utils.rows(df): break scrubbed=valcounts.index[0] if scrubbed == '': return None which = df[col_name].str.startswith(scrubbed) _basics.col_scrubf(df,col_name,which,len(scrubbed),True) if not which.all(): new_col_name = _basics.colname_gen(df,"{}_sf-{}".format(col_name,scrubbed)) df[new_col_name] = which return scrubbed
Scrubs from the front of an 'object' column in a DataFrame until the scrub would semantically alter the contents of the column. If only a subset of the elements in the column are scrubbed, then a boolean array indicating which elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number, default 0 The maximum amount of values this function can ignore while scrubbing, expressed as a fraction of the total amount of rows in the dataframe.
def smart_scrubb(df,col_name,error_rate = 0): scrubbed = "" while True: valcounts = df[col_name].str[-len(scrubbed)-1:].value_counts() if not len(valcounts): break if not valcounts[0] >= (1-error_rate) * _utils.rows(df): break scrubbed=valcounts.index[0] if scrubbed == '': return None which = df[col_name].str.endswith(scrubbed) _basics.col_scrubb(df,col_name,which,len(scrubbed),True) if not which.all(): new_col_name = _basics.colname_gen(df,"{}_sb-{}".format(col_name,scrubbed)) df[new_col_name] = which return scrubbed
Scrubs from the back of an 'object' column in a DataFrame until the scrub would semantically alter the contents of the column. If only a subset of the elements in the column are scrubbed, then a boolean array indicating which elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed. df - DataFrame DataFrame to scrub col_name - string Name of column to scrub error_rate - number, default 0 The maximum amount of values this function can ignore while scrubbing, expressed as a fraction of the total amount of rows in the dataframe.
def find_all_for_order(cls, order_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._find_all_for_order_with_http_info(order_id, **kwargs) else: (data) = cls._find_all_for_order_with_http_info(order_id, **kwargs) return data
Find shipping methods for order. Find all shipping methods suitable for an order. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.find_all_for_order(order_id, async=True) >>> result = thread.get() :param async bool :param str order_id: Order ID to get shipping methods for. (required) :return: page[ShippingMethod] If the method is called asynchronously, returns the request thread.
def list_all_shipping_methods(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_shipping_methods_with_http_info(**kwargs) else: (data) = cls._list_all_shipping_methods_with_http_info(**kwargs) return data
List ShippingMethods Return a list of ShippingMethods This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_shipping_methods(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[ShippingMethod] If the method is called asynchronously, returns the request thread.
def extract(cls, obj): span = cls.extract_span(obj) if span: return span.context
Extract span context from the given object :param Any obj: Object to use as context :return: a SpanContext instance extracted from the inner span object or None if no such span context could be found.
def inject(cls, span, obj): obj.metadata['__parent-span__'] = dict() cls.inject_span(span, obj.metadata['__parent-span__'])
Injects the span context into a `carrier` object. :param opentracing.span.SpanContext span: the SpanContext instance :param Any obj: Object to use as context
def extract_tags(cls, obj): return dict(uuid=obj.uuid, entrypoint=obj.__class__.path)
Extract tags from the given object :param Any obj: Object to use as context :return: Tags to add on span :rtype: dict
def _postrun(cls, span, obj, **kwargs): for key, value in ResultSchema().dump(obj.result).items(): if isinstance(value, dict): try: flat_data = cls.filter_keys( cls.fix_additional_fields(value) ) span.set_tag("result.{}".format(key), json.dumps(flat_data)) except Exception: span.set_tag("result.{}".format(key), "N/A") elif isinstance(value, (list, tuple)): try: span.set_tag("result.{}".format(key), json.dumps(value)) except Exception: try: span.set_tag("result.{}".format(key), value) except Exception: span.set_tag("result.{}".format(key), "N/A") else: span.set_tag("result.{}".format(key), value)
Trigger to execute just before closing the span :param opentracing.span.Span span: the SpanContext instance :param Any obj: Object to use as context :param dict kwargs: additional data
def fix_additional_fields(data): result = dict() for key, value in data.items(): if isinstance(value, dict): result.update(KserSpan.to_flat_dict(key, value)) else: result[key] = value return result
description of fix_additional_fields