code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def _get_representation_doc(self): if not self.representation: return 'N/A' fields = {} for name, field in self.representation.fields.items(): fields[name] = self._get_field_doc(field) return fields
Return documentation for the representation of the resource.
def _get_field_doc(self, field): fieldspec = dict() fieldspec['type'] = field.__class__.__name__ fieldspec['required'] = field.required fieldspec['validators'] = [{validator.__class__.__name__: validator.__dict__} for validator in field.validators] return fieldspec
Return documentation for a field in the representation.
def _get_url_doc(self): resolver = get_resolver(None) possibilities = resolver.reverse_dict.getlist(self) urls = [possibility[0] for possibility in possibilities] return urls
Return a list of URLs that map to this resource.
def _get_method_doc(self): ret = {} for method_name in self.methods: method = getattr(self, method_name, None) if method: ret[method_name] = method.__doc__ return ret
Return method documentations.
def clean(df,error_rate = 0): df = df.copy() # Change colnames basics.clean_colnames(df) # Eventually use a more advanced function to clean colnames print('Changed colnames to {}'.format(df.columns)) # Remove extra whitespace obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: df[col_name] = basics.col_strip(df,col_name) print("Stripped extra whitespace from '{}'".format(col_name)) # Coerce columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) # Scrub columns obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: scrubf, scrubb = smart_scrub(df,col_name,1-error_rate) if scrubf is not None or scrubb is not None: print("Scrubbed '{}' from the front and '{}' from the back of column '{}'" \ .format(scrubf,scrubb,col_name)) # Coerice columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) return df
Superficially cleans data, i.e. changing simple things about formatting. Parameters: df - DataFrame DataFrame to clean error_rate - float {0 <= error_rate <= 1}, default 0 Maximum amount of errors/inconsistencies caused explicitly by cleaning, expressed as a percentage of total dataframe rows (0 = 0%, .5 = 50%, etc.) Ex: na values from coercing a column of data to numeric
def create_process(self, command, shell=True, stdout=None, stderr=None, env=None): env = env if env is not None else dict(os.environ) env['DISPLAY'] = self.display return subprocess.Popen(command, shell=shell, stdout=stdout, stderr=stderr, env=env)
Execute a process using subprocess.Popen, setting the backend's DISPLAY
def pause(self, instance_id, keep_provisioned=True): try: if self._paused: log.debug("node %s is already paused", instance_id) return self._paused = True post_shutdown_action = 'Stopped' if keep_provisioned else \ 'StoppedDeallocated' result = self._subscription._sms.shutdown_role( service_name=self._cloud_service._name, deployment_name=self._cloud_service._name, role_name=self._qualified_name, post_shutdown_action=post_shutdown_action) self._subscription._wait_result(result) except Exception as exc: log.error("error pausing instance %s: %s", instance_id, exc) raise log.debug('paused instance(instance_id=%s)', instance_id)
shuts down the instance without destroying it. The AbstractCloudProvider class uses 'stop' to refer to destroying a VM, so use 'pause' to mean powering it down while leaving it allocated. :param str instance_id: instance identifier :return: None
def restart(self, instance_id): try: if not self._paused: log.debug("node %s is not paused, can't restart", instance_id) return self._paused = False result = self._subscription._sms.start_role( service_name=self._cloud_service._name, deployment_name=self._cloud_service._name, role_name=instance_id) self._subscription._wait_result(result) except Exception as exc: log.error('error restarting instance %s: %s', instance_id, exc) raise log.debug('restarted instance(instance_id=%s)', instance_id)
restarts a paused instance. :param str instance_id: instance identifier :return: None
def stop_instance(self, instance_id): self._restore_from_storage(instance_id) if self._start_failed: raise Exception('stop_instance for node %s: failing due to' ' previous errors.' % instance_id) with self._resource_lock: try: v_m = self._qualified_name_to_vm(instance_id) if not v_m: err = "stop_instance: can't find instance %s" % instance_id log.error(err) raise Exception(err) v_m._cloud_service._stop_vm(v_m) # note: self._n_instances is a derived property, doesn't need # to be updated if self._n_instances == 0: log.debug('last instance deleted, destroying ' 'global resources') self._delete_global_reqs() self._delete_cloud_provider_storage() except Exception as exc: log.error(traceback.format_exc()) log.error("error stopping instance %s: %s", instance_id, exc) raise log.debug('stopped instance %s', instance_id)
Stops the instance gracefully. :param str instance_id: instance identifier :return: None
def get_ips(self, instance_id): self._restore_from_storage(instance_id) if self._start_failed: raise Exception('get_ips for node %s: failing due to' ' previous errors.' % instance_id) ret = list() v_m = self._qualified_name_to_vm(instance_id) if not v_m: raise Exception("Can't find instance_id %s" % instance_id) if self._config._use_public_ips: ret.append(v_m._public_ip) else: ret.append("%s:%s" % (v_m._public_ip, v_m._ssh_port)) log.debug('get_ips (instance %s) returning %s', instance_id, ', '.join(ret)) return ret
Retrieves the private and public ip addresses for a given instance. Note: Azure normally provides access to vms from a shared load balancer IP and mapping of ssh ports on the vms. So by default, the Azure provider returns strings of the form 'ip:port'. However, 'stock' elasticluster and ansible don't support this, so _use_public_ips uses Azure PublicIPs to expose each vm on the internet with its own IP and using the standard SSH port. :return: list (IPs)
def is_instance_running(self, instance_id): self._restore_from_storage(instance_id) if self._start_failed: raise Exception('is_instance_running for node %s: failing due to' ' previous errors.' % instance_id) try: v_m = self._qualified_name_to_vm(instance_id) if not v_m: raise Exception("Can't find instance_id %s" % instance_id) except Exception: log.error(traceback.format_exc()) raise return v_m._power_state == 'Started'
Checks if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise
def _save_or_update(self): with self._resource_lock: if not self._config or not self._config._storage_path: raise Exception("self._config._storage path is undefined") if not self._config._base_name: raise Exception("self._config._base_name is undefined") if not os.path.exists(self._config._storage_path): os.makedirs(self._config._storage_path) path = self._get_cloud_provider_storage_path() with open(path, 'wb') as storage: pickle.dump(self._config, storage, pickle.HIGHEST_PROTOCOL) pickle.dump(self._subscriptions, storage, pickle.HIGHEST_PROTOCOL)
Save or update the private state needed by the cloud provider.
def chunked(src, size, count=None, **kw): chunk_iter = chunked_iter(src, size, **kw) if count is None: return list(chunk_iter) else: return list(itertools.islice(chunk_iter, count))
Returns a list of *count* chunks, each with *size* elements, generated from iterable *src*. If *src* is not evenly divisible by *size*, the final chunk will have fewer than *size* elements. Provide the *fill* keyword argument to provide a pad value and enable padding, otherwise no padding will take place. >>> chunked(range(10), 3) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> chunked(range(10), 3, fill=None) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]] >>> chunked(range(10), 3, count=2) [[0, 1, 2], [3, 4, 5]] See :func:`chunked_iter` for more info.
def chunked_iter(src, size, **kw): # TODO: add count kwarg? if not is_iterable(src): raise TypeError('expected an iterable') size = int(size) if size <= 0: raise ValueError('expected a positive integer chunk size') do_fill = True try: fill_val = kw.pop('fill') except KeyError: do_fill = False fill_val = None if kw: raise ValueError('got unexpected keyword arguments: %r' % kw.keys()) if not src: return postprocess = lambda chk: chk if isinstance(src, basestring): postprocess = lambda chk, _sep=type(src)(): _sep.join(chk) src_iter = iter(src) while True: cur_chunk = list(itertools.islice(src_iter, size)) if not cur_chunk: break lc = len(cur_chunk) if lc < size and do_fill: cur_chunk[lc:] = [fill_val] * (size - lc) yield postprocess(cur_chunk) return
Generates *size*-sized chunks from *src* iterable. Unless the optional *fill* keyword argument is provided, iterables not even divisible by *size* will have a final chunk that is smaller than *size*. >>> list(chunked_iter(range(10), 3)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> list(chunked_iter(range(10), 3, fill=None)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]] Note that ``fill=None`` in fact uses ``None`` as the fill value.
def windowed_iter(src, size): # TODO: lists? (for consistency) tees = itertools.tee(src, size) try: for i, t in enumerate(tees): for _ in xrange(i): next(t) except StopIteration: return izip([]) return izip(*tees)
Returns tuples with length *size* which represent a sliding window over iterable *src*. >>> list(windowed_iter(range(7), 3)) [(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)] If the iterable is too short to make a window of length *size*, then no window tuples are returned. >>> list(windowed_iter(range(3), 5)) []
def xfrange(stop, start=None, step=1.0): if not step: raise ValueError('step must be non-zero') if start is None: start, stop = 0.0, stop * 1.0 else: # swap when all args are used stop, start = start * 1.0, stop * 1.0 cur = start while cur < stop: yield cur cur += step
Same as :func:`frange`, but generator-based instead of returning a list. >>> tuple(xfrange(1, 3, step=0.75)) (1.0, 1.75, 2.5) See :func:`frange` for more details.
def frange(stop, start=None, step=1.0): if not step: raise ValueError('step must be non-zero') if start is None: start, stop = 0.0, stop * 1.0 else: # swap when all args are used stop, start = start * 1.0, stop * 1.0 count = int(math.ceil((stop - start) / step)) ret = [None] * count if not ret: return ret ret[0] = start for i in xrange(1, count): ret[i] = ret[i - 1] + step return ret
A :func:`range` clone for float-based ranges. >>> frange(5) [0.0, 1.0, 2.0, 3.0, 4.0] >>> frange(6, step=1.25) [0.0, 1.25, 2.5, 3.75, 5.0] >>> frange(100.5, 101.5, 0.25) [100.5, 100.75, 101.0, 101.25] >>> frange(5, 0) [] >>> frange(5, 0, step=-1.25) [5.0, 3.75, 2.5, 1.25]
def backoff(start, stop, count=None, factor=2.0, jitter=False): if count == 'repeat': raise ValueError("'repeat' supported in backoff_iter, not backoff") return list(backoff_iter(start, stop, count=count, factor=factor, jitter=jitter))
Returns a list of geometrically-increasing floating-point numbers, suitable for usage with `exponential backoff`_. Exactly like :func:`backoff_iter`, but without the ``'repeat'`` option for *count*. See :func:`backoff_iter` for more details. .. _exponential backoff: https://en.wikipedia.org/wiki/Exponential_backoff >>> backoff(1, 10) [1.0, 2.0, 4.0, 8.0, 10.0]
def partition(src, key=None): bucketized = bucketize(src, key) return bucketized.get(True, []), bucketized.get(False, [])
No relation to :meth:`str.partition`, ``partition`` is like :func:`bucketize`, but for added convenience returns a tuple of ``(truthy_values, falsy_values)``. >>> nonempty, empty = partition(['', '', 'hi', '', 'bye']) >>> nonempty ['hi', 'bye'] *key* defaults to :class:`bool`, but can be carefully overridden to use any function that returns either ``True`` or ``False``. >>> import string >>> is_digit = lambda x: x in string.digits >>> decimal_digits, hexletters = partition(string.hexdigits, is_digit) >>> ''.join(decimal_digits), ''.join(hexletters) ('0123456789', 'abcdefABCDEF')
def unique_iter(src, key=None): if not is_iterable(src): raise TypeError('expected an iterable, not %r' % type(src)) if key is None: key_func = lambda x: x elif callable(key): key_func = key elif isinstance(key, basestring): key_func = lambda x: getattr(x, key, x) else: raise TypeError('"key" expected a string or callable, not %r' % key) seen = set() for i in src: k = key_func(i) if k not in seen: seen.add(k) yield i return
Yield unique elements from the iterable, *src*, based on *key*, in the order in which they first appeared in *src*. >>> repetitious = [1, 2, 3] * 10 >>> list(unique_iter(repetitious)) [1, 2, 3] By default, *key* is the object itself, but *key* can either be a callable or, for convenience, a string name of the attribute on which to uniqueify objects, falling back on identity when the attribute is not present. >>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes'] >>> list(unique_iter(pleasantries, key=lambda x: len(x))) ['hi', 'hello', 'bye']
def one(src, default=None, key=None): ones = list(itertools.islice(filter(key, src), 2)) return ones[0] if len(ones) == 1 else default
Along the same lines as builtins, :func:`all` and :func:`any`, and similar to :func:`first`, ``one()`` returns the single object in the given iterable *src* that evaluates to ``True``, as determined by callable *key*. If unset, *key* defaults to :class:`bool`. If no such objects are found, *default* is returned. If *default* is not passed, ``None`` is returned. If *src* has more than one object that evaluates to ``True``, or if there is no object that fulfills such condition, return *default*. It's like an `XOR`_ over an iterable. >>> one((True, False, False)) True >>> one((True, False, True)) >>> one((0, 0, 'a')) 'a' >>> one((0, False, None)) >>> one((True, True), default=False) False >>> bool(one(('', 1))) True >>> one((10, 20, 30, 42), key=lambda i: i > 40) 42 See `Martín Gaitán's original repo`_ for further use cases. .. _Martín Gaitán's original repo: https://github.com/mgaitan/one .. _XOR: https://en.wikipedia.org/wiki/Exclusive_or
def same(iterable, ref=_UNSET): iterator = iter(iterable) if ref is _UNSET: ref = next(iterator, ref) return all(val == ref for val in iterator)
``same()`` returns ``True`` when all values in *iterable* are equal to one another, or optionally a reference value, *ref*. Similar to :func:`all` and :func:`any` in that it evaluates an iterable and returns a :class:`bool`. ``same()`` returns ``True`` for empty iterables. >>> same([]) True >>> same([1]) True >>> same(['a', 'a', 'a']) True >>> same(range(20)) False >>> same([[], []]) True >>> same([[], []], ref='test') False
def get_path(root, path, default=_UNSET): if isinstance(path, basestring): path = path.split('.') cur = root try: for seg in path: try: cur = cur[seg] except (KeyError, IndexError) as exc: raise PathAccessError(exc, seg, path) except TypeError as exc: # either string index in a list, or a parent that # doesn't support indexing try: seg = int(seg) cur = cur[seg] except (ValueError, KeyError, IndexError, TypeError): if not is_iterable(cur): exc = TypeError('%r object is not indexable' % type(cur).__name__) raise PathAccessError(exc, seg, path) except PathAccessError: if default is _UNSET: raise return default return cur
Retrieve a value from a nested object via a tuple representing the lookup path. >>> root = {'a': {'b': {'c': [[1], [2], [3]]}}} >>> get_path(root, ('a', 'b', 'c', 2, 0)) 3 The path format is intentionally consistent with that of :func:`remap`. One of get_path's chief aims is improved error messaging. EAFP is great, but the error messages are not. For instance, ``root['a']['b']['c'][2][1]`` gives back ``IndexError: list index out of range`` What went out of range where? get_path currently raises ``PathAccessError: could not access 2 from path ('a', 'b', 'c', 2, 1), got error: IndexError('list index out of range',)``, a subclass of IndexError and KeyError. You can also pass a default that covers the entire operation, should the lookup fail at any level. Args: root: The target nesting of dictionaries, lists, or other objects supporting ``__getitem__``. path (tuple): A list of strings and integers to be successively looked up within *root*. default: The value to be returned should any ``PathAccessError`` exceptions be raised.
def unflatten(data, separator='.', replace=True): ''' Expand all compound keys (at any depth) into nested dicts In [13]: d = {'test.test2': {'k1.k2': 'val'}} In [14]: flange.expand(d) Out[14]: {'test.test2': {'k1': {'k2': 'val'}}} :param data: input dict :param separator: separator in compound keys :param replace: if true, remove the compound key. Otherwise the value will exist under the compound and expanded key :return: copy of input dict with expanded keys ''' if not separator: return data return remap({'temp':data}, visit=lambda p, k, v: __expand_keys(k, v, separator, replace))['temp'f unflatten(data, separator='.', replace=True): ''' Expand all compound keys (at any depth) into nested dicts In [13]: d = {'test.test2': {'k1.k2': 'val'}} In [14]: flange.expand(d) Out[14]: {'test.test2': {'k1': {'k2': 'val'}}} :param data: input dict :param separator: separator in compound keys :param replace: if true, remove the compound key. Otherwise the value will exist under the compound and expanded key :return: copy of input dict with expanded keys ''' if not separator: return data return remap({'temp':data}, visit=lambda p, k, v: __expand_keys(k, v, separator, replace))['temp']
Expand all compound keys (at any depth) into nested dicts In [13]: d = {'test.test2': {'k1.k2': 'val'}} In [14]: flange.expand(d) Out[14]: {'test.test2': {'k1': {'k2': 'val'}}} :param data: input dict :param separator: separator in compound keys :param replace: if true, remove the compound key. Otherwise the value will exist under the compound and expanded key :return: copy of input dict with expanded keys
def __query(p, k, v, accepted_keys=None, required_values=None, path=None, exact=True): # if not k: # print '__query p k:', p, k # print p, k, accepted_keys, required_values, path, exact def as_values_iterable(v): if isinstance(v, dict): return v.values() elif isinstance(v, six.string_types): return [v] else: # assume is already some iterable type return v if path and path != p: return False if accepted_keys: if isinstance(accepted_keys, six.string_types): accepted_keys = [accepted_keys] if len([akey for akey in accepted_keys if akey == k or (not exact and akey in k)]) == 0: return False if required_values: if isinstance(required_values, six.string_types): required_values = [required_values] # Find all terms in the vfilter that have a match somewhere in the values of the v dict. If the # list is shorter than vfilter then some terms did not match and this v fails the test. if len(required_values) > len([term for term in required_values for nv in as_values_iterable(v) if term == nv or (not exact and term in nv)]): return False return True
Query function given to visit method :param p: visited path in tuple form :param k: visited key :param v: visited value :param accepted_keys: list of keys where one must match k to satisfy query. :param required_values: list of values where one must match v to satisfy query :param path: exact path in tuple form that must match p to satisfy query :param exact: if True then key and value match uses contains function instead of == :return: True if all criteria are satisfied, otherwise False
def create_customer_group(cls, customer_group, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_customer_group_with_http_info(customer_group, **kwargs) else: (data) = cls._create_customer_group_with_http_info(customer_group, **kwargs) return data
Create CustomerGroup Create a new CustomerGroup This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_customer_group(customer_group, async=True) >>> result = thread.get() :param async bool :param CustomerGroup customer_group: Attributes of customerGroup to create (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread.
def delete_customer_group_by_id(cls, customer_group_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_customer_group_by_id_with_http_info(customer_group_id, **kwargs) else: (data) = cls._delete_customer_group_by_id_with_http_info(customer_group_id, **kwargs) return data
Delete CustomerGroup Delete an instance of CustomerGroup by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_customer_group_by_id(customer_group_id, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_customer_group_by_id(cls, customer_group_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_customer_group_by_id_with_http_info(customer_group_id, **kwargs) else: (data) = cls._get_customer_group_by_id_with_http_info(customer_group_id, **kwargs) return data
Find CustomerGroup Return single instance of CustomerGroup by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_group_by_id(customer_group_id, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to return (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread.
def list_all_customer_groups(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_customer_groups_with_http_info(**kwargs) else: (data) = cls._list_all_customer_groups_with_http_info(**kwargs) return data
List CustomerGroups Return a list of CustomerGroups This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_customer_groups(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[CustomerGroup] If the method is called asynchronously, returns the request thread.
def replace_customer_group_by_id(cls, customer_group_id, customer_group, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_customer_group_by_id_with_http_info(customer_group_id, customer_group, **kwargs) else: (data) = cls._replace_customer_group_by_id_with_http_info(customer_group_id, customer_group, **kwargs) return data
Replace CustomerGroup Replace all attributes of CustomerGroup This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_customer_group_by_id(customer_group_id, customer_group, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to replace (required) :param CustomerGroup customer_group: Attributes of customerGroup to replace (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread.
def update_customer_group_by_id(cls, customer_group_id, customer_group, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_customer_group_by_id_with_http_info(customer_group_id, customer_group, **kwargs) else: (data) = cls._update_customer_group_by_id_with_http_info(customer_group_id, customer_group, **kwargs) return data
Update CustomerGroup Update attributes of CustomerGroup This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_customer_group_by_id(customer_group_id, customer_group, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to update. (required) :param CustomerGroup customer_group: Attributes of customerGroup to update. (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread.
def _connect(self): # check for existing connection with GoogleCloudProvider.__gce_lock: if self._gce: return self._gce flow = OAuth2WebServerFlow(self._client_id, self._client_secret, GCE_SCOPE) # The `Storage` object holds the credentials that your # application needs to authorize access to the user's # data. The name of the credentials file is provided. If the # file does not exist, it is created. This object can only # hold credentials for a single user. It stores the access # priviledges for the application, so a user only has to grant # access through the web interface once. storage_path = os.path.join(self._storage_path, self._client_id + '.oauth.dat') storage = Storage(storage_path) credentials = storage.get() if credentials is None or credentials.invalid: args = argparser.parse_args([]) args.noauth_local_webserver = self._noauth_local_webserver # try to start a browser to have the user authenticate with Google # TODO: what kind of exception is raised if the browser # cannot be started? try: credentials = run_flow(flow, storage, flags=args) except: import sys print "Unexpected error:", sys.exc_info()[0] raise http = httplib2.Http() self._auth_http = credentials.authorize(http) self._gce = build(GCE_API_NAME, GCE_API_VERSION, http=http) return self._gce
Connects to the cloud web services. If this is the first authentication, a web browser will be started to authenticate against google and provide access to elasticluster. :return: A Resource object with methods for interacting with the service.
def _get_image_url(self, image_id): gce = self._connect() filter = "name eq %s" % image_id request = gce.images().list(project=self._project_id, filter=filter) response = self._execute_request(request) response = self._wait_until_done(response) image_url = None if "items" in response: image_url = response["items"][0]["selfLink"] if image_url: return image_url else: raise ImageError("Could not find given image id `%s`" % image_id)
Gets the url for the specified image. Unfortunatly this only works for images uploaded by the user. The images provided by google will not be found. :param str image_id: image identifier :return: str - api url of the image
def create_free_shipping(cls, free_shipping, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_free_shipping_with_http_info(free_shipping, **kwargs) else: (data) = cls._create_free_shipping_with_http_info(free_shipping, **kwargs) return data
Create FreeShipping Create a new FreeShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_free_shipping(free_shipping, async=True) >>> result = thread.get() :param async bool :param FreeShipping free_shipping: Attributes of freeShipping to create (required) :return: FreeShipping If the method is called asynchronously, returns the request thread.
def delete_free_shipping_by_id(cls, free_shipping_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) else: (data) = cls._delete_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) return data
Delete FreeShipping Delete an instance of FreeShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_free_shipping_by_id(free_shipping_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_free_shipping_by_id(cls, free_shipping_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) else: (data) = cls._get_free_shipping_by_id_with_http_info(free_shipping_id, **kwargs) return data
Find FreeShipping Return single instance of FreeShipping by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_free_shipping_by_id(free_shipping_id, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to return (required) :return: FreeShipping If the method is called asynchronously, returns the request thread.
def list_all_free_shippings(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_free_shippings_with_http_info(**kwargs) else: (data) = cls._list_all_free_shippings_with_http_info(**kwargs) return data
List FreeShippings Return a list of FreeShippings This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_free_shippings(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[FreeShipping] If the method is called asynchronously, returns the request thread.
def replace_free_shipping_by_id(cls, free_shipping_id, free_shipping, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs) else: (data) = cls._replace_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs) return data
Replace FreeShipping Replace all attributes of FreeShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_free_shipping_by_id(free_shipping_id, free_shipping, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to replace (required) :param FreeShipping free_shipping: Attributes of freeShipping to replace (required) :return: FreeShipping If the method is called asynchronously, returns the request thread.
def update_free_shipping_by_id(cls, free_shipping_id, free_shipping, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs) else: (data) = cls._update_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs) return data
Update FreeShipping Update attributes of FreeShipping This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_free_shipping_by_id(free_shipping_id, free_shipping, async=True) >>> result = thread.get() :param async bool :param str free_shipping_id: ID of freeShipping to update. (required) :param FreeShipping free_shipping: Attributes of freeShipping to update. (required) :return: FreeShipping If the method is called asynchronously, returns the request thread.
def publish(dataset_uri): try: dataset = dtoolcore.DataSet.from_uri(dataset_uri) except dtoolcore.DtoolCoreTypeError: print("Not a dataset: {}".format(dataset_uri)) sys.exit(1) try: access_uri = dataset._storage_broker.http_enable() except AttributeError: print( "Datasets of type '{}' cannot be published using HTTP".format( dataset._storage_broker.key) ) sys.exit(2) return access_uri
Return access URL to HTTP enabled (published) dataset. Exits with error code 1 if the dataset_uri is not a dataset. Exits with error code 2 if the dataset cannot be HTTP enabled.
def cli(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "dataset_uri", help="Dtool dataset URI" ) parser.add_argument( "-q", "--quiet", action="store_true", help="Only return the http URI" ) args = parser.parse_args() access_uri = publish(args.dataset_uri) if args.quiet: print(access_uri) else: print("Dataset accessible at: {}".format(access_uri))
Command line utility to HTTP enable (publish) a dataset.
def execute(self): creator = make_creator(self.params.config, storage_path=self.params.storage) cluster_name = self.params.cluster try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as ex: log.error("Listing nodes from cluster %s: %s\n" % (cluster_name, ex)) return from elasticluster.gc3pie_config import create_gc3pie_config_snippet if self.params.append: path = os.path.expanduser(self.params.append) try: fd = open(path, 'a') fd.write(create_gc3pie_config_snippet(cluster)) fd.close() except IOError as ex: log.error("Unable to write configuration to file %s: %s", path, ex) else: print(create_gc3pie_config_snippet(cluster))
Load the cluster and build a GC3Pie configuration snippet.
def write_xml(self, outfile, encoding="UTF-8"): # we add the media namespace if we see any media items if any([key for item in self.items for key in vars(item) if key.startswith('media_') and getattr(item, key)]): self.rss_attrs["xmlns:media"] = "http://search.yahoo.com/mrss/" self.generator = _generator_name super(MediaRSS2, self).write_xml(outfile, encoding)
Write the Media RSS Feed's XML representation to the given file.
def _add_attribute(self, name, value, allowed_values=None): if value and value != 'none': if isinstance(value, (int, bool)): value = str(value) if allowed_values and value not in allowed_values: raise TypeError( "Attribute '" + name + "' must be one of " + str( allowed_values) + " but is '" + str(value) + "'") self.element_attrs[name] = value
Add an attribute to the MediaContent element.
def check_complicance(self): # check Media RSS requirement: one of the following elements is # required: media_group | media_content | media_player | media_peerLink # | media_location. We do the check only if any media_... element is # set to allow non media feeds if(any([ma for ma in vars(self) if ma.startswith('media_') and getattr(self, ma)]) and not self.media_group and not self.media_content and not self.media_player and not self.media_peerLink and not self.media_location ): raise AttributeError( "Using media elements requires the specification of at least " "one of the following elements: 'media_group', " "'media_content', 'media_player', 'media_peerLink' or " "'media_location'.") # check Media RSS requirement: if media:player is missing all # media_content elements need to have url attributes. if not self.media_player: if self.media_content: # check if all media_content elements have a URL set if isinstance(self.media_content, list): if not all([False for mc in self.media_content if 'url' not in mc.element_attrs]): raise AttributeError( "MediaRSSItems require a media_player attribute " "if a media_content has no url set.") else: if not self.media_content.element_attrs['url']: raise AttributeError( "MediaRSSItems require a media_player attribute " "if a media_content has no url set.") pass elif self.media_group: # check media groups without player if its media_content # elements have a URL set raise NotImplementedError( "MediaRSSItem: media_group check not implemented yet.")
Check compliance with Media RSS Specification, Version 1.5.1. see http://www.rssboard.org/media-rss Raises AttributeError on error.
def publish_extensions(self, handler): if isinstance(self.media_content, list): [PyRSS2Gen._opt_element(handler, "media:content", mc_element) for mc_element in self.media_content] else: PyRSS2Gen._opt_element(handler, "media:content", self.media_content) if hasattr(self, 'media_title'): PyRSS2Gen._opt_element(handler, "media:title", self.media_title) if hasattr(self, 'media_text'): PyRSS2Gen._opt_element(handler, "media:text", self.media_text)
Publish the Media RSS Feed elements as XML.
def get_conversations(self): cs = self.data["data"] res = [] for c in cs: res.append(Conversation(c)) return res
Returns list of Conversation objects
def _accumulate(iterable, func=(lambda a,b:a+b)): # this was from the itertools documentation 'Return running totals' # accumulate([1,2,3,4,5]) --> 1 3 6 10 15 # accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120 it = iter(iterable) try: total = next(it) except StopIteration: return yield total for element in it: total = func(total, element) yield totaf _accumulate(iterable, func=(lambda a,b:a+b)): # this was from the itertools documentation 'Return running totals' # accumulate([1,2,3,4,5]) --> 1 3 6 10 15 # accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120 it = iter(iterable) try: total = next(it) except StopIteration: return yield total for element in it: total = func(total, element) yield total
Return running totals
def add_methods(methods_to_add): ''' use this to bulk add new methods to Generator ''' for i in methods_to_add: try: Generator.add_method(*i) except Exception as ex: raise Exception('issue adding {} - {}'.format(repr(i), ex)f add_methods(methods_to_add): ''' use this to bulk add new methods to Generator ''' for i in methods_to_add: try: Generator.add_method(*i) except Exception as ex: raise Exception('issue adding {} - {}'.format(repr(i), ex))
use this to bulk add new methods to Generator
def run(self): if KSER_METRICS_ENABLED == "yes": from prometheus_client import start_http_server logger.info("Metric.Starting...") start_http_server( os.getenv("KSER_METRICS_PORT", 8888), os.getenv("KSER_METRICS_ADDRESS", "0.0.0.0") ) logger.info("{}.Starting...".format(self.__class__.__name__)) running = True while running: msg = self.client.poll() if msg: # noinspection PyProtectedMember if not msg.error(): self.REGISTRY.run(msg.value().decode('utf-8')) elif msg.error().code() != KafkaError._PARTITION_EOF: logger.error(msg.error()) running = False self.client.close()
Run consumer
def walklevel(path, depth = -1, **kwargs): # if depth is negative, just walk if depth < 0: for root, dirs, files in os.walk(path, **kwargs): yield root, dirs, files # path.count works because is a file has a "/" it will show up in the list # as a ":" path = path.rstrip(os.path.sep) num_sep = path.count(os.path.sep) for root, dirs, files in os.walk(path, **kwargs): yield root, dirs, files num_sep_this = root.count(os.path.sep) if num_sep + depth <= num_sep_this: del dirs[:]
It works just like os.walk, but you can pass it a level parameter that indicates how deep the recursion will go. If depth is -1 (or less than 0), the full depth is walked.
def tree_construct(self, *args, **kwargs): l_files = [] d_constructCallback = {} fn_constructCallback = None for k, v in kwargs.items(): if k == 'l_files': l_files = v if k == 'constructCallback': fn_constructCallback = v index = 1 total = len(l_files) for l_series in l_files: str_path = os.path.dirname(l_series[0]) l_series = [ os.path.basename(i) for i in l_series] self.simpleProgress_show(index, total) self.d_inputTree[str_path] = l_series if fn_constructCallback: kwargs['path'] = str_path d_constructCallback = fn_constructCallback(l_series, **kwargs) self.d_inputTreeCallback[str_path] = d_constructCallback self.d_outputTree[str_path] = "" index += 1 return { 'status': True, 'd_constructCalback': d_constructCallback, 'totalNumberOfAllSeries': index }
Processes the <l_files> list of files from the tree_probe() and builds the input/output dictionary structures. Optionally execute a constructCallback function, and return results
def dirsize_get(l_filesWithoutPath, **kwargs): str_path = "" for k,v in kwargs.items(): if k == 'path': str_path = v d_ret = {} l_size = [] size = 0 for f in l_filesWithoutPath: str_f = '%s/%s' % (str_path, f) if not os.path.islink(str_f): try: size += os.path.getsize(str_f) except: pass str_size = pftree.sizeof_fmt(size) return { 'status': True, 'diskUsage_raw': size, 'diskUsage_human': str_size }
Sample callback that determines a directory size.
def tree_analysisOutput(self, *args, **kwargs): fn_outputcallback = None for k, v in kwargs.items(): if k == 'outputcallback': fn_outputcallback = v index = 1 total = len(self.d_inputTree.keys()) for path, d_analysis in self.d_outputTree.items(): self.simpleProgress_show(index, total) self.dp.qprint("Processing analysis results in output: %s" % path) d_output = fn_outputcallback((path, d_analysis), **kwargs) return { 'status': True }
An optional method for looping over the <outputTree> and calling an outputcallback on the analysis results at each path. Only call this if self.b_persisAnalysisResults is True.
def stats_compute(self, *args, **kwargs): totalElements = 0 totalKeys = 0 totalSize = 0 l_stats = [] d_report = {} for k, v in sorted(self.d_inputTreeCallback.items(), key = lambda kv: (kv[1]['diskUsage_raw']), reverse = self.b_statsReverse): str_report = "files: %5d; raw size: %12d; human size: %8s; %s" % (\ len(self.d_inputTree[k]), self.d_inputTreeCallback[k]['diskUsage_raw'], self.d_inputTreeCallback[k]['diskUsage_human'], k) d_report = { 'files': len(self.d_inputTree[k]), 'diskUsage_raw': self.d_inputTreeCallback[k]['diskUsage_raw'], 'diskUsage_human': self.d_inputTreeCallback[k]['diskUsage_human'], 'path': k } self.dp.qprint(str_report, level = 1) l_stats.append(d_report) totalElements += len(v) totalKeys += 1 totalSize += self.d_inputTreeCallback[k]['diskUsage_raw'] str_totalSize_human = self.sizeof_fmt(totalSize) return { 'status': True, 'dirs': totalKeys, 'files': totalElements, 'totalSize': totalSize, 'totalSize_human': str_totalSize_human, 'l_stats': l_stats, 'runTime': other.toc() }
Simply loop over the internal dictionary and echo the list size at each key (i.e. the number of files).
def inputReadCallback(self, *args, **kwargs): b_status = True filesRead = 0 for k, v in kwargs.items(): if k == 'l_file': l_file = v if k == 'path': str_path = v if len(args): at_data = args[0] str_path = at_data[0] l_file = at_data[1] self.dp.qprint("reading (in path %s):\n%s" % (str_path, self.pp.pformat(l_file)), level = 5) filesRead = len(l_file) if not len(l_file): b_status = False return { 'status': b_status, 'l_file': l_file, 'str_path': str_path, 'filesRead': filesRead }
Test for inputReadCallback This method does not actually "read" the input files, but simply returns the passed file list back to caller
def inputAnalyzeCallback(self, *args, **kwargs): b_status = False filesRead = 0 filesAnalyzed = 0 for k, v in kwargs.items(): if k == 'filesRead': d_DCMRead = v if k == 'path': str_path = v if len(args): at_data = args[0] str_path = at_data[0] d_read = at_data[1] b_status = True self.dp.qprint("analyzing:\n%s" % self.pp.pformat(d_read['l_file']), level = 5) if int(self.f_sleepLength): self.dp.qprint("sleeping for: %f" % self.f_sleepLength, level = 5) time.sleep(self.f_sleepLength) filesAnalyzed = len(d_read['l_file']) return { 'status': b_status, 'filesAnalyzed': filesAnalyzed, 'l_file': d_read['l_file'] }
Test method for inputAnalzeCallback This method loops over the passed number of files, and optionally "delays" in each loop to simulate some analysis. The delay length is specified by the '--test <delay>' flag.
def outputSaveCallback(self, at_data, **kwargs): path = at_data[0] d_outputInfo = at_data[1] other.mkdir(self.str_outputDir) filesSaved = 0 other.mkdir(path) if not self.testType: str_outfile = '%s/file-ls.txt' % path else: str_outfile = '%s/file-count.txt' % path with open(str_outfile, 'w') as f: self.dp.qprint("saving: %s" % (str_outfile), level = 5) if not self.testType: f.write('%s`' % self.pp.pformat(d_outputInfo['l_file'])) else: f.write('%d\n' % d_outputInfo['filesAnalyzed']) filesSaved += 1 return { 'status': True, 'outputFile': str_outfile, 'filesSaved': filesSaved }
Test method for outputSaveCallback Simply writes a file in the output tree corresponding to the number of files in the input tree.
def check_required_params(self): for param in self.REQUIRED_FIELDS: if param not in self.params: raise ValidationError("Missing parameter: {} for {}".format( param, self.__class__.path )) for child in self.TASKS: for param in child.REQUIRED_FIELDS: if param not in self.params: raise ValidationError( "Missing parameter: {} for {}".format(param, child.path) )
Check if all required parameters are set
def _set_status(self, status, result=None): logger.info( "{}.SetStatus: {}[{}] status update '{}' -> '{}'".format( self.__class__.__name__, self.__class__.path, self.uuid, self.status, status ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params ).dump() ) ) return self.set_status(status, result)
update operation status :param str status: New status :param cdumay_result.Result result: Execution result
def _prerun(self): self.check_required_params() self._set_status("RUNNING") logger.debug( "{}.PreRun: {}[{}]: running...".format( self.__class__.__name__, self.__class__.path, self.uuid ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params ).dump() ) ) return self.prerun()
To execute before running message
def _onsuccess(self, result): self._set_status("SUCCESS", result) logger.info( "{}.Success: {}[{}]: {}".format( self.__class__.__name__, self.__class__.path, self.uuid, result ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params ).dump(), kresult=ResultSchema().dump(result) if result else dict() ) ) return self.onsuccess(result)
To execute on execution success :param cdumay_result.Result result: Execution result :return: Execution result :rtype: cdumay_result.Result
def _onerror(self, result): self._set_status("FAILED", result) logger.error( "{}.Failed: {}[{}]: {}".format( self.__class__.__name__, self.__class__.path, self.uuid, result ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params ).dump(), kresult=ResultSchema().dump(result) if result else dict() ) ) return self.onerror(result)
To execute on execution failure :param cdumay_result.Result result: Execution result :return: Execution result :rtype: cdumay_result.Result
def display(self): print("{}".format(self)) for task in self.tasks: print(" - {}".format(task))
dump operation
def next(self, task): uuid = str(task.uuid) for idx, otask in enumerate(self.tasks[:-1]): if otask.uuid == uuid: if self.tasks[idx + 1].status != 'SUCCESS': return self.tasks[idx + 1] else: uuid = self.tasks[idx + 1].uuid
Find the next task :param kser.sequencing.task.Task task: previous task :return: The next task :rtype: kser.sequencing.task.Task or None
def launch_next(self, task=None, result=None): if task: next_task = self.next(task) if next_task: return next_task.send(result=result) else: return self.set_status(task.status, result) elif len(self.tasks) > 0: return self.tasks[0].send(result=result) else: return Result(retcode=1, stderr="Nothing to do, empty operation !")
Launch next task or finish operation :param kser.sequencing.task.Task task: previous task :param cdumay_result.Result result: previous task result :return: Execution result :rtype: cdumay_result.Result
def compute_tasks(self, **kwargs): params = self._prebuild(**kwargs) if not params: params = dict(kwargs) return self._build_tasks(**params)
perfrom checks and build tasks :return: list of tasks :rtype: list(kser.sequencing.operation.Operation)
def build(self, **kwargs): self.tasks += self.compute_tasks(**kwargs) return self.finalize()
create the operation and associate tasks :param dict kwargs: operation data :return: the controller :rtype: kser.sequencing.controller.OperationController
def serve_dtool_directory(directory, port): os.chdir(directory) server_address = ("localhost", port) httpd = DtoolHTTPServer(server_address, DtoolHTTPRequestHandler) httpd.serve_forever()
Serve the datasets in a directory over HTTP.
def cli(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "dataset_directory", help="Directory with datasets to be served" ) parser.add_argument( "-p", "--port", type=int, default=8081, help="Port to serve datasets on (default 8081)" ) args = parser.parse_args() if not os.path.isdir(args.dataset_directory): parser.error("Not a directory: {}".format(args.dataset_directory)) serve_dtool_directory(args.dataset_directory, args.port)
Command line utility for serving datasets in a directory over HTTP.
def generate_url(self, suffix): url_base_path = os.path.dirname(self.path) netloc = "{}:{}".format(*self.server.server_address) return urlunparse(( "http", netloc, url_base_path + "/" + suffix, "", "", ""))
Return URL by combining server details with a path suffix.
def generate_item_urls(self): item_urls = {} for i in self.dataset.identifiers: relpath = self.dataset.item_properties(i)["relpath"] url = self.generate_url("data/" + relpath) item_urls[i] = url return item_urls
Return dict with identifier/URL pairs for the dataset items.
def generate_overlay_urls(self): overlays = {} for o in self.dataset.list_overlay_names(): url = self.generate_url(".dtool/overlays/{}.json".format(o)) overlays[o] = url return overlays
Return dict with overlay/URL pairs for the dataset overlays.
def generate_http_manifest(self): base_path = os.path.dirname(self.translate_path(self.path)) self.dataset = dtoolcore.DataSet.from_uri(base_path) admin_metadata_fpath = os.path.join(base_path, ".dtool", "dtool") with open(admin_metadata_fpath) as fh: admin_metadata = json.load(fh) http_manifest = { "admin_metadata": admin_metadata, "manifest_url": self.generate_url(".dtool/manifest.json"), "readme_url": self.generate_url("README.yml"), "overlays": self.generate_overlay_urls(), "item_urls": self.generate_item_urls() } return bytes(json.dumps(http_manifest), "utf-8")
Return http manifest. The http manifest is the resource that defines a dataset as HTTP enabled (published).
def do_GET(self): if self.path.endswith("http_manifest.json"): try: manifest = self.generate_http_manifest() self.send_response(200) self.end_headers() self.wfile.write(manifest) except dtoolcore.DtoolCoreTypeError: self.send_response(400) self.end_headers() else: super(DtoolHTTPRequestHandler, self).do_GET()
Override inherited do_GET method. Include logic for returning a http manifest when the URL ends with "http_manifest.json".
def indent(self, code, level=1): '''python's famous indent''' lines = code.split('\n') lines = tuple(self.indent_space*level + line for line in lines) return '\n'.join(linesf indent(self, code, level=1): '''python's famous indent''' lines = code.split('\n') lines = tuple(self.indent_space*level + line for line in lines) return '\n'.join(lines)
python's famous indent
def setup_database_connection( pathToYamlFile): import sys import logging import pymysql as ms # IMPORT THE YAML CONNECTION DICTIONARY try: logging.info( 'importing the yaml database connection dictionary from ' + pathToYamlFile) stream = file(pathToYamlFile, 'r') connDict = yaml.load(stream) except: logging.critical( 'could not load the connect dictionary from ' + pathToYamlFile) sys.exit(1) # ESTABLISH A DB CONNECTION try: logging.info('connecting to the ' + connDict[ 'db'] + ' database on ' + connDict['host']) dbConn = ms.connect( host=connDict['host'], user=connDict['user'], passwd=connDict['password'], db=connDict['db'], use_unicode=True, charset='utf8', local_infile=1, client_flag=ms.constants.CLIENT.MULTI_STATEMENTS, connect_timeout=36000 ) dbConn.autocommit(True) except Exception as e: logging.critical('could not connect to the ' + connDict['db'] + ' database on ' + connDict['host'] + ' : ' + str(e) + '\n') return dbConn
*Start a database connection using settings in yaml file* Given the location of a YAML dictionary containing database credientials, this function will setup and return the connection* **Key Arguments:** - ``pathToYamlFile`` -- path to the YAML dictionary. **Return:** - ``dbConn`` -- connection to the MySQL database. **Usage:** The settings file should be in this form, with all keyword values set: .. code-block:: yaml db: dryx_unit_testing host: localhost user: unittesting password: utpass And here's how to generate the connection object: .. code-block:: python from fundamentals.mysql import setup_database_connection dbConn = setup_database_connection( pathToYamlFile=pathToMyYamlFile )
def create_order_line_item(cls, order_line_item, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_order_line_item_with_http_info(order_line_item, **kwargs) else: (data) = cls._create_order_line_item_with_http_info(order_line_item, **kwargs) return data
Create OrderLineItem Create a new OrderLineItem This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_order_line_item(order_line_item, async=True) >>> result = thread.get() :param async bool :param OrderLineItem order_line_item: Attributes of orderLineItem to create (required) :return: OrderLineItem If the method is called asynchronously, returns the request thread.
def delete_order_line_item_by_id(cls, order_line_item_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs) else: (data) = cls._delete_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs) return data
Delete OrderLineItem Delete an instance of OrderLineItem by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_order_line_item_by_id(order_line_item_id, async=True) >>> result = thread.get() :param async bool :param str order_line_item_id: ID of orderLineItem to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
def get_order_line_item_by_id(cls, order_line_item_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs) else: (data) = cls._get_order_line_item_by_id_with_http_info(order_line_item_id, **kwargs) return data
Find OrderLineItem Return single instance of OrderLineItem by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_order_line_item_by_id(order_line_item_id, async=True) >>> result = thread.get() :param async bool :param str order_line_item_id: ID of orderLineItem to return (required) :return: OrderLineItem If the method is called asynchronously, returns the request thread.
def list_all_order_line_items(cls, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_order_line_items_with_http_info(**kwargs) else: (data) = cls._list_all_order_line_items_with_http_info(**kwargs) return data
List OrderLineItems Return a list of OrderLineItems This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_order_line_items(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[OrderLineItem] If the method is called asynchronously, returns the request thread.
def replace_order_line_item_by_id(cls, order_line_item_id, order_line_item, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs) else: (data) = cls._replace_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs) return data
Replace OrderLineItem Replace all attributes of OrderLineItem This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_order_line_item_by_id(order_line_item_id, order_line_item, async=True) >>> result = thread.get() :param async bool :param str order_line_item_id: ID of orderLineItem to replace (required) :param OrderLineItem order_line_item: Attributes of orderLineItem to replace (required) :return: OrderLineItem If the method is called asynchronously, returns the request thread.
def update_order_line_item_by_id(cls, order_line_item_id, order_line_item, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs) else: (data) = cls._update_order_line_item_by_id_with_http_info(order_line_item_id, order_line_item, **kwargs) return data
Update OrderLineItem Update attributes of OrderLineItem This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_order_line_item_by_id(order_line_item_id, order_line_item, async=True) >>> result = thread.get() :param async bool :param str order_line_item_id: ID of orderLineItem to update. (required) :param OrderLineItem order_line_item: Attributes of orderLineItem to update. (required) :return: OrderLineItem If the method is called asynchronously, returns the request thread.
def is_grammar_generating(grammar, remove=False): # type: (Grammar, bool) -> bool g = ContextFree.remove_nongenerating_nonterminals(grammar, remove) return g.start is not None
Check if is grammar is generating. Generating grammar generates at least one sentence. :param grammar: Grammar to check. :param remove: True to remove nongenerating symbols from the grammar. :return: True if is grammar generating, false otherwise.
def remove_useless_symbols(grammar, inplace=False): # type: (Grammar, bool) -> Grammar grammar = ContextFree.remove_nongenerating_nonterminals(grammar, inplace) grammar = ContextFree.remove_unreachable_symbols(grammar, True) return grammar
Remove useless symbols from the grammar. Useless symbols are unreachable or nongenerating one. :param grammar: Grammar where to symbols remove :param inplace: True if transformation should be performed in place, false otherwise. False by default. :return: Grammar without useless symbols.
def prepare_for_cyk(grammar, inplace=False): # type: (Grammar, bool) -> Grammar grammar = ContextFree.remove_useless_symbols(grammar, inplace) grammar = ContextFree.remove_rules_with_epsilon(grammar, True) grammar = ContextFree.remove_unit_rules(grammar, True) grammar = ContextFree.remove_useless_symbols(grammar, True) grammar = ContextFree.transform_to_chomsky_normal_form(grammar, True) return grammar
Take common context-free grammar and perform all the necessary steps to use it in the CYK algorithm. Performs following steps: - remove useless symbols - remove rules with epsilon - remove unit rules - remove useless symbols once more (as previous steps could change the grammar) - transform it to Chomsky Normal Form :param grammar: Grammar to transform. :param inplace: True if the operation should be done in place. False by default. :return: Modified grammar.
def retrieve_authorization_code(self, redirect_func=None): request_param = { "client_id": self.client_id, "redirect_uri": self.redirect_uri, } if self.scope: request_param['scope'] = self.scope if self._extra_auth_params: request_param.update(self._extra_auth_params) r = requests.get(self.auth_uri, params=request_param, allow_redirects=False) url = r.headers.get('location') if self.local: webbrowser.open_new_tab(url) authorization_code = raw_input("Code: ") if self.validate_code(authorization_code): self.authorization_code = authorization_code else: return redirect_func(url)
retrieve authorization code to get access token
def retrieve_token(self): if self.authorization_code: request_param = { "client_id": self.client_id, "client_secret": self.client_secret, "redirect_uri": self.redirect_uri, "code": self.authorization_code } if self._extra_token_params: request_param.update(self._extra_token_params) content_length = len(urlencode(request_param)) headers = { 'Content-Length': str(content_length), 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post(self.token_uri, data=request_param, headers=headers) jsondata = json.loads(r.text) self.access_token = jsondata return self.access_token else: print("authorization code is required before getting accesss token") print("Please call retrieve_authorization_code() beforehand")
retrieve access token with code fetched via retrieve_authorization_code method.
def to_dict(self): '''Represents the setup section in form of key-value pairs. Returns ------- dict ''' mapping = dict() for attr in dir(self): if attr.startswith('_'): continue if not isinstance(getattr(self.__class__, attr), property): continue try: value = getattr(self, attr) except AttributeError: if attr in self._OPTIONAL_ATTRS: continue else: raise AttributeError( 'Required attribute "{0}" does not exist on ' 'instance of type "{1}.'.format( attr, self.__class__.__name__ ) ) mapping[attr] = value return mappinf to_dict(self): '''Represents the setup section in form of key-value pairs. Returns ------- dict ''' mapping = dict() for attr in dir(self): if attr.startswith('_'): continue if not isinstance(getattr(self.__class__, attr), property): continue try: value = getattr(self, attr) except AttributeError: if attr in self._OPTIONAL_ATTRS: continue else: raise AttributeError( 'Required attribute "{0}" does not exist on ' 'instance of type "{1}.'.format( attr, self.__class__.__name__ ) ) mapping[attr] = value return mapping
Represents the setup section in form of key-value pairs. Returns ------- dict
def key_file_private(self): '''str: path to the private key used by Ansible to connect to virtual machines (by default looks for a file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory) ''' if not hasattr(self, '_key_file_private'): self.key_file_private = '~/.ssh/{key}'.format(key=self.key_name) return self._key_file_privatf key_file_private(self): '''str: path to the private key used by Ansible to connect to virtual machines (by default looks for a file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory) ''' if not hasattr(self, '_key_file_private'): self.key_file_private = '~/.ssh/{key}'.format(key=self.key_name) return self._key_file_private
str: path to the private key used by Ansible to connect to virtual machines (by default looks for a file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory)
def key_file_public(self): '''str: path to the public key that will be uploaded to the cloud provider (by default looks for a ``.pub`` file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory) ''' if not hasattr(self, '_key_file_public'): self.key_file_public = '~/.ssh/{key}.pub'.format(key=self.key_name) return self._key_file_publif key_file_public(self): '''str: path to the public key that will be uploaded to the cloud provider (by default looks for a ``.pub`` file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory) ''' if not hasattr(self, '_key_file_public'): self.key_file_public = '~/.ssh/{key}.pub'.format(key=self.key_name) return self._key_file_public
str: path to the public key that will be uploaded to the cloud provider (by default looks for a ``.pub`` file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory)
def tm_group(self): '''str: TissueMAPS system group (defaults to :attr:`tm_user <tmdeploy.config.AnsibleHostVariableSection.tm_user>`) ''' if self._tm_group is None: self._tm_group = self.tm_user return self._tm_grouf tm_group(self): '''str: TissueMAPS system group (defaults to :attr:`tm_user <tmdeploy.config.AnsibleHostVariableSection.tm_user>`) ''' if self._tm_group is None: self._tm_group = self.tm_user return self._tm_group
str: TissueMAPS system group (defaults to :attr:`tm_user <tmdeploy.config.AnsibleHostVariableSection.tm_user>`)
def db_group(self): '''str: database system group (defaults to :attr:`db_user <tmdeploy.config.AnsibleHostVariableSection.db_user>`) ''' if self._db_group is None: self._db_group = self.db_user return self._db_grouf db_group(self): '''str: database system group (defaults to :attr:`db_user <tmdeploy.config.AnsibleHostVariableSection.db_user>`) ''' if self._db_group is None: self._db_group = self.db_user return self._db_group
str: database system group (defaults to :attr:`db_user <tmdeploy.config.AnsibleHostVariableSection.db_user>`)
def web_group(self): '''str: web system group (defaults to :attr:`web_user <tmdeploy.config.AnsibleHostVariableSection.web_user>`) ''' if self._web_group is None: self._web_group = self.web_user return self._web_grouf web_group(self): '''str: web system group (defaults to :attr:`web_user <tmdeploy.config.AnsibleHostVariableSection.web_user>`) ''' if self._web_group is None: self._web_group = self.web_user return self._web_group
str: web system group (defaults to :attr:`web_user <tmdeploy.config.AnsibleHostVariableSection.web_user>`)
def mtime(path): if not os.path.exists(path): return -1 stat = os.stat(path) return stat.st_mtime
Get the modification time of a file, or -1 if the file does not exist.
def get_cached(path, cache_name=None, **kwargs): if gw2api.cache_dir and gw2api.cache_time and cache_name is not False: if cache_name is None: cache_name = path cache_file = os.path.join(gw2api.cache_dir, cache_name) if mtime(cache_file) >= time.time() - gw2api.cache_time: with open(cache_file, "r") as fp: return json.load(fp) else: cache_file = None r = gw2api.session.get(gw2api.BASE_URL + path, **kwargs) if not r.ok: try: response = r.json() except ValueError: # pragma: no cover response = None if isinstance(response, dict) and "text" in response: r.reason = response["text"] r.raise_for_status() data = r.json() if cache_file: with open(cache_file, "w") as fp: json.dump(data, fp, indent=2) return data
Request a resource form the API, first checking if there is a cached response available. Returns the parsed JSON data.
def encode_item_link(item_id, number=1, skin_id=None, upgrade1=None, upgrade2=None): return encode_chat_link(gw2api.TYPE_ITEM, id=item_id, number=number, skin_id=skin_id, upgrade1=upgrade1, upgrade2=upgrade2)
Encode a chat link for an item (or a stack of items). :param item_id: the Id of the item :param number: the number of items in the stack :param skin_id: the id of the skin applied to the item :param upgrade1: the id of the first upgrade component :param upgrade2: the id of the second upgrade component
def encode_coin_link(copper, silver=0, gold=0): return encode_chat_link(gw2api.TYPE_COIN, copper=copper, silver=silver, gold=gold)
Encode a chat link for an amount of coins.
def status(self, status): allowed_values = ["pending", "awaitingRetry", "successful", "failed"] if status is not None and status not in allowed_values: raise ValueError( "Invalid value for `status` ({0}), must be one of {1}" .format(status, allowed_values) ) self._status = status
Sets the status of this StoreCreditPayment. :param status: The status of this StoreCreditPayment. :type: str
def create_store_credit_payment(cls, store_credit_payment, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._create_store_credit_payment_with_http_info(store_credit_payment, **kwargs) else: (data) = cls._create_store_credit_payment_with_http_info(store_credit_payment, **kwargs) return data
Create StoreCreditPayment Create a new StoreCreditPayment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.create_store_credit_payment(store_credit_payment, async=True) >>> result = thread.get() :param async bool :param StoreCreditPayment store_credit_payment: Attributes of storeCreditPayment to create (required) :return: StoreCreditPayment If the method is called asynchronously, returns the request thread.