code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def delete_s3_bucket(client, resource): if dbconfig.get('enable_delete_s3_buckets', NS_AUDITOR_REQUIRED_TAGS, False): client.delete_bucket(Bucket=resource.id) return ActionStatus.SUCCEED, resource.metrics()
Delete an S3 bucket This function will try to delete an S3 bucket Args: client (:obj:`boto3.session.Session.client`): A boto3 client object resource (:obj:`Resource`): The resource object to terminate Returns: `ActionStatus`
def get_time_server(): ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getnetworktimeserver') return salt.utils.mac_utils.parse_return(ret)
Display the currently set network time server. :return: the network time server :rtype: str CLI Example: .. code-block:: bash salt '*' timezone.get_time_server
def to_json(value, **kwargs): if isinstance(value, HasProperties): return value.serialize(**kwargs) try: return json.loads(json.dumps(value)) except TypeError: raise TypeError( "Cannot convert type {} to JSON without calling 'serialize' " "on an instance of Instance Property and registering a custom " "serializer".format(value.__class__.__name__) )
Convert instance to JSON
def _CheckIsFile(self, file_entry): if definitions.FILE_ENTRY_TYPE_FILE not in self._file_entry_types: return False return file_entry.IsFile()
Checks the is_file find specification. Args: file_entry (FileEntry): file entry. Returns: bool: True if the file entry matches the find specification, False if not.
def stop_periodic_snapshots(self): if self._periodic_thread and self._periodic_thread.isAlive(): self._periodic_thread.stop = True self._periodic_thread.join() self._periodic_thread = None
Post a stop signal to the thread that takes the periodic snapshots. The function waits for the thread to terminate which can take some time depending on the configured interval.
def _expected_condition_find_element(self, element): from toolium.pageelements.page_element import PageElement web_element = False try: if isinstance(element, PageElement): element._web_element = None element._find_web_element() web_element = element._web_element elif isinstance(element, tuple): web_element = self.driver_wrapper.driver.find_element(*element) except NoSuchElementException: pass return web_element
Tries to find the element, but does not thrown an exception if the element is not found :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :returns: the web element if it has been found or False :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
def get_project(self, name): uri = '{base}/{project}'.format(base=self.BASE_URI, project=name) resp = self._client.get(uri, model=models.Project) return resp
Retrives project information by name :param name: The formal project name in string form.
def hook_up(self, router: UrlDispatcher): router.add_get(self.webhook_path, self.check_hook) router.add_post(self.webhook_path, self.receive_events)
Dynamically hooks the right webhook paths
def new_status(self, new_status): allowed_values = ["NEW", "DONE", "REJECTED"] if new_status not in allowed_values: raise ValueError( "Invalid value for `new_status` ({0}), must be one of {1}" .format(new_status, allowed_values) ) self._new_status = new_status
Sets the new_status of this BuildSetStatusChangedEvent. :param new_status: The new_status of this BuildSetStatusChangedEvent. :type: str
def statusEnquiry(): a = TpPd(pd=0x3) b = MessageType(mesType=0x34) packet = a / b return packet
STATUS ENQUIRY Section 9.3.28
def swap_columns(self, column_name_1, column_name_2, inplace=False): if inplace: self.__is_dirty__ = True with cython_context(): if self._is_vertex_frame(): graph_proxy = self.__graph__.__proxy__.swap_vertex_fields(column_name_1, column_name_2) self.__graph__.__proxy__ = graph_proxy elif self._is_edge_frame(): graph_proxy = self.__graph__.__proxy__.swap_edge_fields(column_name_1, column_name_2) self.__graph__.__proxy__ = graph_proxy return self else: return super(GFrame, self).swap_columns(column_name_1, column_name_2, inplace=inplace)
Swaps the columns with the given names. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- column_name_1 : string Name of column to swap column_name_2 : string Name of other column to swap inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place.
def convert(self, destination_units, *, convert_variables=False): if self.units is None and (destination_units is None or destination_units == "None"): return if not wt_units.is_valid_conversion(self.units, destination_units): valid = wt_units.get_valid_conversions(self.units) raise wt_exceptions.UnitsError(valid, destination_units) if convert_variables: for v in self.variables: v.convert(destination_units) self.units = destination_units
Convert axis to destination_units. Parameters ---------- destination_units : string Destination units. convert_variables : boolean (optional) Toggle conversion of stored arrays. Default is False.
def example_lab_to_rgb(): print("=== RGB Example: Lab->RGB ===") lab = LabColor(0.903, 16.296, -2.217) print(lab) rgb = convert_color(lab, sRGBColor) print(rgb) print("=== End Example ===\n")
Conversions to RGB are a little more complex mathematically. There are also several kinds of RGB color spaces. When converting from a device-independent color space to RGB, sRGB is assumed unless otherwise specified with the target_rgb keyword arg.
def check_ups_alarms_present(the_session, the_helper, the_snmp_value): if the_snmp_value != '0': the_helper.add_status(pynag.Plugins.critical) else: the_helper.add_status(pynag.Plugins.ok) the_helper.set_summary("{} active alarms ".format(the_snmp_value))
OID .1.3.6.1.2.1.33.1.6.1.0 MIB excerpt The present number of active alarm conditions.
def publish(branch, full_force=False): checkout(branch) try: push('--force --set-upstream origin', branch) except ExistingReference: if full_force: push('origin --delete', branch) push('--force --set-upstream origin', branch)
Publish that branch, i.e. push it to origin
def _load(self): if self.dbfile is not None: with open(self.dbfile, 'r') as f: self._db = json.loads(f.read()) else: self._db = {}
Load the database from its ``dbfile`` if it has one
def keyword_hookup(self, noteId, keywords): try: self.cur.execute("DELETE FROM notekeyword WHERE noteid=?", [noteId]) except: self.error("ERROR: cannot unhook previous keywords") for keyword in keywords: keyword = keyword.decode('utf-8') self.fyi(" inserting keyword:", keyword) keywordId = self.con.execute("SELECT keywordId FROM keyword WHERE keyword = ?;", [keyword]).fetchone() try: if keywordId: self.fyi(" (existing keyword with id: %s)" % keywordId) keywordId = keywordId[0] else: self.fyi(" (new keyword)") self.cur.execute("INSERT INTO keyword(keyword) VALUES (?);", [keyword]) keywordId = self.cur.lastrowid self.con.execute("INSERT INTO notekeyword(noteId, keywordID) VALUES(?, ?)", [noteId, keywordId]) except: self.error("error hooking up keyword '%s'" % keyword) self.con.commit()
Unhook existing cross-linking entries.
def _get_wcs_request(self, bbox, time_interval, size_x, size_y, maxcc, time_difference, custom_url_params): return WcsRequest(layer=self.data_feature, bbox=bbox, time=time_interval, resx=size_x, resy=size_y, maxcc=maxcc, custom_url_params=custom_url_params, time_difference=time_difference, image_format=self.image_format, data_source=self.data_source, instance_id=self.instance_id)
Returns WCS request.
def get_username(identifier): pattern = re.compile('.+@\w+\..+') if pattern.match(identifier): try: user = User.objects.get(email=identifier) except: raise Http404 else: return user.username else: return identifier
Checks if a string is a email adress or not.
def do_set_log_level(self, arg): if arg in ['i', 'v']: _LOGGING.info('Setting log level to %s', arg) if arg == 'i': _LOGGING.setLevel(logging.INFO) _INSTEONPLM_LOGGING.setLevel(logging.INFO) else: _LOGGING.setLevel(logging.DEBUG) _INSTEONPLM_LOGGING.setLevel(logging.DEBUG) else: _LOGGING.error('Log level value error.') self.do_help('set_log_level')
Set the log level. Usage: set_log_level i|v Parameters: log_level: i - info | v - verbose
def build_endpoint_name(cls, name, endpoint_prefix=None): if endpoint_prefix is None: endpoint_prefix = 'api_{}'.format( cls.__name__.replace('Resource', '').lower() ) endpoint_prefix = endpoint_prefix.rstrip('_') return '_'.join([endpoint_prefix, name])
Given a ``name`` & an optional ``endpoint_prefix``, this generates a name for a URL. :param name: The name for the URL (ex. 'detail') :type name: string :param endpoint_prefix: (Optional) A prefix for the URL's name (for resolving). The default is ``None``, which will autocreate a prefix based on the class name. Ex: ``BlogPostResource`` -> ``api_blogpost_list`` :type endpoint_prefix: string :returns: The final name :rtype: string
def addToPrePrepares(self, pp: PrePrepare) -> None: key = (pp.viewNo, pp.ppSeqNo) self.prePrepares[key] = pp self.lastPrePrepareSeqNo = pp.ppSeqNo self.last_accepted_pre_prepare_time = pp.ppTime self.dequeue_prepares(*key) self.dequeue_commits(*key) self.stats.inc(TPCStat.PrePrepareRcvd) self.tryPrepare(pp)
Add the specified PRE-PREPARE to this replica's list of received PRE-PREPAREs and try sending PREPARE :param pp: the PRE-PREPARE to add to the list
def rnormal(mu, tau, size=None): return np.random.normal(mu, 1. / np.sqrt(tau), size)
Random normal variates.
def xyz(self): if not self.children: pos = np.expand_dims(self._pos, axis=0) else: arr = np.fromiter(itertools.chain.from_iterable( particle.pos for particle in self.particles()), dtype=float) pos = arr.reshape((-1, 3)) return pos
Return all particle coordinates in this compound. Returns ------- pos : np.ndarray, shape=(n, 3), dtype=float Array with the positions of all particles.
def ping(host, timeout=False, return_boolean=False): if timeout: timeout = int(timeout) * 1000 // 4 cmd = ['ping', '-n', '4', '-w', six.text_type(timeout), salt.utils.network.sanitize_host(host)] else: cmd = ['ping', '-n', '4', salt.utils.network.sanitize_host(host)] if return_boolean: ret = __salt__['cmd.run_all'](cmd, python_shell=False) if ret['retcode'] != 0: return False else: return True else: return __salt__['cmd.run'](cmd, python_shell=False)
Performs a ping to a host CLI Example: .. code-block:: bash salt '*' network.ping archlinux.org .. versionadded:: 2016.11.0 Return a True or False instead of ping output. .. code-block:: bash salt '*' network.ping archlinux.org return_boolean=True Set the time to wait for a response in seconds. .. code-block:: bash salt '*' network.ping archlinux.org timeout=3
def home_abbreviation(self): abbr = re.sub(r'.*/teams/', '', str(self._home_name)) abbr = re.sub(r'/.*', '', abbr) return abbr
Returns a ``string`` of the home team's abbreviation, such as 'KAN'.
def members(self): with self._mutex: if not self._members: self._members = {} for o in self.organisations: self._members[o.org_id] = o.obj.get_members() return self._members
Member components if this component is composite.
def get_term_freq(self, go_id): num_ns = float(self.get_total_count(self.go2obj[go_id].namespace)) return float(self.get_count(go_id))/num_ns if num_ns != 0 else 0
Returns the frequency at which a particular GO term has been observed in the annotations.
def show_profiles(self): for i, (id, profiler, showed) in enumerate(self.profilers): if not showed and profiler: profiler.show(id) self.profilers[i][2] = True
Print the profile stats to stdout
def setup_files(class_dir, seed): random.seed(seed) files = list_files(class_dir) files.sort() random.shuffle(files) return files
Returns shuffled files
def get_html(self): if self.__htmltree is not None: return self.__htmltree else: self.__make_tree() return self.__htmltree
Generates if need be and returns a simpler html document with text
async def start_component(workload: CoroutineFunction[T], *args: Any, **kwargs: Any) -> Component[T]: commands_a, commands_b = pipe() events_a, events_b = pipe() task = asyncio.create_task(workload(*args, commands=commands_b, events=events_b, **kwargs)) component = Component[T](commands_a, events_a, task) await component.wait_for_start() return component
\ Starts the passed `workload` with additional `commands` and `events` pipes. The workload will be executed as a task. A simple example. Note that here, the component is exclusively reacting to commands, and the owner waits for acknowledgements to its commands, making the order of outputs predictable. >>> @component_workload ... async def component(msg, *, commands, events): ... # do any startup tasks here ... print("> component starting up...") ... await events.send(Component.EVENT_START) ... ... count = 0 ... while True: ... command = await commands.recv() ... if command == Component.COMMAND_STOP: ... # honor stop commands ... break ... elif command == 'ECHO': ... print(f"> {msg}") ... count += 1 ... # acknowledge the command was serviced completely ... await commands.send(None) ... else: ... # unknown command; terminate ... # by closing the commands pipe, ... # the caller (if waiting for a reply) will receive an EOFError ... await commands.send(eof=True) ... raise ValueError ... ... # do any cleanup tasks here, probably in a finally block ... print("> component cleaning up...") ... return count ... >>> async def example(): ... print("call start") ... comp = await start_component(component, "Hello World") ... print("done") ... ... print("send command") ... await comp.request('ECHO') ... print("done") ... ... print("call stop") ... count = await comp.stop() ... print("done") ... ... print(count) ... >>> asyncio.run(example()) call start > component starting up... done send command > Hello World done call stop > component cleaning up... done 1
def svg2str(display_object, dpi=300): from io import StringIO image_buf = StringIO() display_object.frame_axes.figure.savefig( image_buf, dpi=dpi, format='svg', facecolor='k', edgecolor='k') return image_buf.getvalue()
Serializes a nilearn display object as a string
def check_elasticsearch(record, *args, **kwargs): def can(self): search = request._methodview.search_class() search = search.get_record(str(record.id)) return search.count() == 1 return type('CheckES', (), {'can': can})()
Return permission that check if the record exists in ES index. :params record: A record object. :returns: A object instance with a ``can()`` method.
def add_configuration(self, configuration, collect_another_source, done, result, src): if "includes" in result: for include in result["includes"]: collect_another_source(include) configuration.update(result, source=src)
Used to add a file to the configuration, result here is the yaml.load of the src
def load_parent_implems(self, parent_implems): for trname, attr, implem in parent_implems.get_custom_implementations(): self.implementations[trname] = implem.copy() self.transitions_at[trname] = attr self.custom_implems.add(trname)
Import previously defined implementations. Args: parent_implems (ImplementationList): List of implementations defined in a parent class.
def removeAllEntitlements(self, appId): params = { "f" : "json", "appId" : appId } url = self._url + "/licenses/removeAllEntitlements" return self._post(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
This operation removes all entitlements from the portal for ArcGIS Pro or additional products such as Navigator for ArcGIS and revokes all entitlements assigned to users for the specified product. The portal is no longer a licensing portal for that product. License assignments are retained on disk. Therefore, if you decide to configure this portal as a licensing portal for the product again in the future, all licensing assignments will be available in the website. Parameters: appId - The identifier for the application for which the entitlements are being removed.
def make_butterworth_bandpass_b_a(CenterFreq, bandwidth, SampleFreq, order=5, btype='band'): lowcut = CenterFreq-bandwidth/2 highcut = CenterFreq+bandwidth/2 b, a = make_butterworth_b_a(lowcut, highcut, SampleFreq, order, btype) return b, a
Generates the b and a coefficients for a butterworth bandpass IIR filter. Parameters ---------- CenterFreq : float central frequency of bandpass bandwidth : float width of the bandpass from centre to edge SampleFreq : float Sample frequency of filter order : int, optional order of IIR filter. Is 5 by default btype : string, optional type of filter to make e.g. (band, low, high) Returns ------- b : ndarray coefficients multiplying the current and past inputs (feedforward coefficients) a : ndarray coefficients multiplying the past outputs (feedback coefficients)
def delete(self, using=None): from organizations.exceptions import OwnershipRequired try: if self.organization.owner.organization_user.pk == self.pk: raise OwnershipRequired( _( "Cannot delete organization owner " "before organization or transferring ownership." ) ) except self._org_owner_model.DoesNotExist: pass super(AbstractBaseOrganizationUser, self).delete(using=using)
If the organization user is also the owner, this should not be deleted unless it's part of a cascade from the Organization. If there is no owner then the deletion should proceed.
def storage_class(self, value): if value not in self._STORAGE_CLASSES: raise ValueError("Invalid storage class: %s" % (value,)) self._patch_property("storageClass", value)
Set the storage class for the bucket. See https://cloud.google.com/storage/docs/storage-classes :type value: str :param value: one of "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD", or "DURABLE_REDUCED_AVAILABILITY"
def run(self, order=None): for event in self.runner.run(order=order): self.receive(event)
self.runner must be present
def normalise_key(self, key): key = key.replace('-', '_') if key.startswith("noy_"): key = key[4:] return key
Make sure key is a valid python attribute
def endswith_strip(s, endswith='.txt', ignorecase=True): if ignorecase: if s.lower().endswith(endswith.lower()): return s[:-len(endswith)] else: if s.endswith(endswith): return s[:-len(endswith)] return s
Strip a suffix from the end of a string >>> endswith_strip('http://TotalGood.com', '.COM') 'http://TotalGood' >>> endswith_strip('http://TotalGood.com', endswith='.COM', ignorecase=False) 'http://TotalGood.com'
def _compile_files(self): for f in glob.glob(os.path.join(self.dir_path, '*.py')): if not os.path.isfile(os.path.join(self.dir_path, f + 'c')): compileall.compile_dir(self.dir_path, quiet=True) logging.debug('Compiled plugins as a new plugin has been added.') return elif os.path.getmtime(os.path.join(self.dir_path, f)) > os.path.getmtime( os.path.join(self.dir_path, f + 'c')): compileall.compile_dir(self.dir_path, quiet=True) logging.debug('Compiled plugins as a plugin has been changed.') return
Compiles python plugin files in order to be processed by the loader. It compiles the plugins if they have been updated or haven't yet been compiled.
def rename(name, new_name, **kwargs): flags = [] target = [] if __utils__['zfs.is_snapshot'](name): if kwargs.get('create_parent', False): log.warning('zfs.rename - create_parent=True cannot be used with snapshots.') if kwargs.get('force', False): log.warning('zfs.rename - force=True cannot be used with snapshots.') if kwargs.get('recursive', False): flags.append('-r') else: if kwargs.get('create_parent', False): flags.append('-p') if kwargs.get('force', False): flags.append('-f') if kwargs.get('recursive', False): log.warning('zfs.rename - recursive=True can only be used with snapshots.') target.append(name) target.append(new_name) res = __salt__['cmd.run_all']( __utils__['zfs.zfs_command']( command='rename', flags=flags, target=target, ), python_shell=False, ) return __utils__['zfs.parse_command_result'](res, 'renamed')
Rename or Relocate a ZFS File System. name : string name of dataset, volume, or snapshot new_name : string new name of dataset, volume, or snapshot force : boolean force unmount any filesystems that need to be unmounted in the process. create_parent : boolean creates all the nonexistent parent datasets. Datasets created in this manner are automatically mounted according to the mountpoint property inherited from their parent. recursive : boolean recursively rename the snapshots of all descendent datasets. snapshots are the only dataset that can be renamed recursively. .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' zfs.rename myzpool/mydataset myzpool/renameddataset
def _mark_dirty(self, xblock, value): if self not in xblock._dirty_fields: xblock._dirty_fields[self] = copy.deepcopy(value)
Set this field to dirty on the xblock.
def get_all_vpcs(self, vpc_ids=None, filters=None): params = {} if vpc_ids: self.build_list_params(params, vpc_ids, 'VpcId') if filters: i = 1 for filter in filters: params[('Filter.%d.Name' % i)] = filter[0] params[('Filter.%d.Value.1' % i)] = filter[1] i += 1 return self.get_list('DescribeVpcs', params, [('item', VPC)])
Retrieve information about your VPCs. You can filter results to return information only about those VPCs that match your search parameters. Otherwise, all VPCs associated with your account are returned. :type vpc_ids: list :param vpc_ids: A list of strings with the desired VPC ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the VPC (pending or available) - *cidrBlock*, CIDR block of the VPC - *dhcpOptionsId*, the ID of a set of DHCP options :rtype: list :return: A list of :class:`boto.vpc.vpc.VPC`
def get_lattice_type(cryst): lattice_types = [ [3, "Triclinic"], [16, "Monoclinic"], [75, "Orthorombic"], [143, "Tetragonal"], [168, "Trigonal"], [195, "Hexagonal"], [231, "Cubic"] ] sg = spg.get_spacegroup(cryst) m = re.match(r'([A-Z].*\b)\s*\(([0-9]*)\)', sg) sg_name = m.group(1) sg_nr = int(m.group(2)) for n, l in enumerate(lattice_types): if sg_nr < l[0]: bravais = l[1] lattype = n+1 break return lattype, bravais, sg_name, sg_nr
Find the symmetry of the crystal using spglib symmetry finder. Derive name of the space group and its number extracted from the result. Based on the group number identify also the lattice type and the Bravais lattice of the crystal. The lattice type numbers are (the numbering starts from 1): Triclinic (1), Monoclinic (2), Orthorombic (3), Tetragonal (4), Trigonal (5), Hexagonal (6), Cubic (7) :param cryst: ASE Atoms object :returns: tuple (lattice type number (1-7), lattice name, space group name, space group number)
def nx_all_nodes_between(graph, source, target, data=False): import utool as ut if source is None: sources = list(ut.nx_source_nodes(graph)) assert len(sources) == 1, ( 'specify source if there is not only one') source = sources[0] if target is None: sinks = list(ut.nx_sink_nodes(graph)) assert len(sinks) == 1, ( 'specify sink if there is not only one') target = sinks[0] all_simple_paths = list(nx.all_simple_paths(graph, source, target)) nodes = sorted(set.union(*map(set, all_simple_paths))) return nodes
Find all nodes with on paths between source and target.
def UTCFromGps(gpsWeek, SOW, leapSecs=14): secFract = SOW % 1 epochTuple = gpsEpoch + (-1, -1, 0) t0 = time.mktime(epochTuple) - time.timezone tdiff = (gpsWeek * secsInWeek) + SOW - leapSecs t = t0 + tdiff (year, month, day, hh, mm, ss, dayOfWeek, julianDay, daylightsaving) = time.gmtime(t) return (year, month, day, hh, mm, ss + secFract)
converts gps week and seconds to UTC see comments of inverse function! SOW = seconds of week gpsWeek is the full number (not modulo 1024)
def on_widget_created(self, ref): d = self.declaration self.widget = Snackbar(__id__=ref) self.init_widget()
Using Snackbar.make returns async so we have to initialize it later.
def to_array(self): array = super(Animation, self).to_array() array['file_id'] = u(self.file_id) array['width'] = int(self.width) array['height'] = int(self.height) array['duration'] = int(self.duration) if self.thumb is not None: array['thumb'] = self.thumb.to_array() if self.file_name is not None: array['file_name'] = u(self.file_name) if self.mime_type is not None: array['mime_type'] = u(self.mime_type) if self.file_size is not None: array['file_size'] = int(self.file_size) return array
Serializes this Animation to a dictionary. :return: dictionary representation of this object. :rtype: dict
def _execCommand(Argv, collect_missing): r if not Argv: raise HandledException('Please specify a command!') RouteParts = Argv[0].split('/') Args, KwArgs = getDigestableArgs(Argv[1:]) ResolvedMember = getDescendant(BaseGroup, RouteParts[:]) if isinstance(ResolvedMember, Group): raise HandledException('Please specify a task.', Member=ResolvedMember) if not isinstance(ResolvedMember, Task): raise HandledException('No such task.', Member=BaseGroup) return ResolvedMember.__collect_n_call__(*Args, **KwArgs) if collect_missing else ResolvedMember(*Args, **KwArgs)
r"""Worker of execCommand.
def delete_api_method_response(restApiId, resourcePath, httpMethod, statusCode, region=None, key=None, keyid=None, profile=None): try: resource = describe_api_resource(restApiId, resourcePath, region=region, key=key, keyid=keyid, profile=profile).get('resource') if resource: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_method_response(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod, statusCode=str(statusCode)) return {'deleted': True} return {'deleted': False, 'error': 'no such resource'} except ClientError as e: return {'deleted': False, 'error': __utils__['boto3.get_error'](e)}
Delete API method response for a resource in the given API CLI Example: .. code-block:: bash salt myminion boto_apigateway.delete_api_method_response restApiId resourcePath httpMethod statusCode
def apply_effect(layer, image): for effect in layer.effects: if effect.__class__.__name__ == 'PatternOverlay': draw_pattern_fill(image, layer._psd, effect.value) for effect in layer.effects: if effect.__class__.__name__ == 'GradientOverlay': draw_gradient_fill(image, effect.value) for effect in layer.effects: if effect.__class__.__name__ == 'ColorOverlay': draw_solid_color_fill(image, effect.value)
Apply effect to the image. ..note: Correct effect order is the following. All the effects are first applied to the original image then blended together. * dropshadow * outerglow * (original) * patternoverlay * gradientoverlay * coloroverlay * innershadow * innerglow * bevelemboss * satin * stroke
def is_model_factory(node): try: parent_classes = node.expr.inferred() except: return False parents = ('factory.declarations.LazyFunction', 'factory.declarations.SubFactory', 'factory.django.DjangoModelFactory') for parent_class in parent_classes: try: if parent_class.qname() in parents: return True if node_is_subclass(parent_class, *parents): return True except AttributeError: continue return False
Checks that node is derivative of DjangoModelFactory or SubFactory class.
def load_styles(path_or_doc): if isinstance(path_or_doc, string_types): doc = load(path_or_doc) else: if isinstance(path_or_doc, ODFDocument): doc = path_or_doc._doc else: doc = path_or_doc assert isinstance(doc, OpenDocument), doc styles = {_style_name(style): style for style in doc.styles.childNodes} return styles
Return a dictionary of all styles contained in an ODF document.
def get_major_minor(ilo_ver_str): if not ilo_ver_str: return None try: pattern = re.search(ILO_VER_STR_PATTERN, ilo_ver_str) if pattern: matched = pattern.group(0) if matched: return matched return None except Exception: return None
Extract the major and minor number from the passed string :param ilo_ver_str: the string that contains the version information :returns: String of the form "<major>.<minor>" or None
def runtime_values_changed(self, model, prop_name, info): if ("_input_runtime_value" in info.method_name or info.method_name in ['use_runtime_value_input_data_ports', 'input_data_port_runtime_values']) and \ self.model is model: self._data_ports_changed(model)
Handle cases for the library runtime values
def StatsToCSV(campaign, model='nPLD'): statsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (campaign, model)) csvfile = os.path.join(os.path.dirname(EVEREST_SRC), 'docs', 'c%02d.csv' % campaign) epic, kp, cdpp6r, cdpp6, _, _, _, _, saturated = \ np.loadtxt(statsfile, unpack=True, skiprows=2) with open(csvfile, 'w') as f: print('c%02d' % campaign, file=f) for i in range(len(epic)): print('%09d,%.3f,%.3f,%.3f,%d' % (epic[i], kp[i], cdpp6r[i], cdpp6[i], int(saturated[i])), file=f)
Generate the CSV file used in the search database for the documentation.
def localize_shapefile(shp_href, dirs): mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601) shp_href = urljoin(dirs.source.rstrip('/')+'/', shp_href) scheme, host, path, p, q, f = urlparse(shp_href) if scheme in ('http','https'): msg('%s | %s' % (shp_href, dirs.cache)) scheme, path = '', locally_cache_remote_file(shp_href, dirs.cache) else: host = None to_posix(systempath.realpath(path)) if scheme not in ('file', ''): raise Exception("Shapefile needs to be local, not %s" % shp_href) if mapnik_requires_absolute_paths: path = posixpath.realpath(path) original = path path = dirs.output_path(path) if path.endswith('.zip'): path = posixpath.join(dirs.output, path) path = unzip_shapefile_into(path, dirs.cache, host) return dirs.output_path(path)
Given a shapefile href and a set of directories, modify the shapefile name so it's correct with respect to the output and cache directories.
def reference(self): total_enabled = ", ".join(self.selected) if len(total_enabled) < 1: total_enabled = ("{0}Are you crazy? This is a package " "manager for packages :p{1}".format( self.meta.color["RED"], self.meta.color["ENDC"])) self.msg.template(78) print("| Enabled repositories:") self.msg.template(78) print("| {0}".format(total_enabled)) self.msg.template(78) print("{0}Total {1}/{2} repositories enabled.{3}\n".format( self.meta.color["GREY"], len(self.selected), len(self.enabled + self.disabled), self.meta.color["ENDC"]))
Reference enable repositories
def avglosses_data_transfer(token, dstore): oq = dstore['oqparam'] N = len(dstore['assetcol']) R = dstore['csm_info'].get_num_rlzs() L = len(dstore.get_attr('risk_model', 'loss_types')) ct = oq.concurrent_tasks size_bytes = N * R * L * 8 * ct return ( '%d asset(s) x %d realization(s) x %d loss type(s) losses x ' '8 bytes x %d tasks = %s' % (N, R, L, ct, humansize(size_bytes)))
Determine the amount of average losses transferred from the workers to the controller node in a risk calculation.
def fromStructTime(klass, structTime, tzinfo=None): dtime = datetime.datetime(tzinfo=tzinfo, *structTime[:6]) self = klass.fromDatetime(dtime) self.resolution = datetime.timedelta(seconds=1) return self
Return a new Time instance from a time.struct_time. If tzinfo is None, structTime is in UTC. Otherwise, tzinfo is a datetime.tzinfo instance coresponding to the timezone in which structTime is. Many of the functions in the standard time module return these things. This will also work with a plain 9-tuple, for parity with the time module. The last three elements, or tm_wday, tm_yday, and tm_isdst are ignored.
def is_tomodir(subdirectories): required = ( 'exe', 'config', 'rho', 'mod', 'inv' ) is_tomodir = True for subdir in required: if subdir not in subdirectories: is_tomodir = False return is_tomodir
provided with the subdirectories of a given directory, check if this is a tomodir
def group_comments_by_round(comments, ranking=0): comment_rounds = {} ordered_comment_round_names = [] for comment in comments: comment_round_name = ranking and comment[11] or comment[7] if comment_round_name not in comment_rounds: comment_rounds[comment_round_name] = [] ordered_comment_round_names.append(comment_round_name) comment_rounds[comment_round_name].append(comment) return [(comment_round_name, comment_rounds[comment_round_name]) for comment_round_name in ordered_comment_round_names]
Group comments by the round to which they belong
def get_cv_accuracy(res): ac_list = [(accuracy["train_acc_final"], accuracy["test_acc_final"] ) for accuracy, weights in res] ac = np.array(ac_list) perf = { "mean_train_acc": np.mean(ac[:, 0]), "std_train_acc": np.std(ac[:, 0]), "mean_test_acc": np.mean(ac[:, 1]), "std_test_acc": np.std(ac[:, 1]), } return perf
Extract the cv accuracy from the model
def from_buffer(buffer, mime=False): m = _get_magic_type(mime) return m.from_buffer(buffer)
Accepts a binary string and returns the detected filetype. Return value is the mimetype if mime=True, otherwise a human readable name. >>> magic.from_buffer(open("testdata/test.pdf").read(1024)) 'PDF document, version 1.2'
def info(self): if self.descriptions is None: choice_list = ['"{}"'.format(choice) for choice in self.choices] else: choice_list = [ '"{}" ({})'.format(choice, self.descriptions[choice]) for choice in self.choices ] if len(self.choices) == 2: return 'either {} or {}'.format(choice_list[0], choice_list[1]) return 'any of {}'.format(', '.join(choice_list))
Formatted string to display the available choices
def acknowledge_alarm(self, alarm, comment=None): url = '/processors/{}/{}/parameters{}/alarms/{}'.format( self._instance, self._processor, alarm.name, alarm.sequence_number) req = rest_pb2.EditAlarmRequest() req.state = 'acknowledged' if comment is not None: req.comment = comment self._client.put_proto(url, data=req.SerializeToString())
Acknowledges a specific alarm associated with a parameter. :param alarm: Alarm instance :type alarm: :class:`.Alarm` :param str comment: Optional comment to associate with the state change.
def set_ddns_config(self, isenable, hostname, ddnsserver, user, password, callback=None): params = {'isEnable': isenable, 'hostName': hostname, 'ddnsServer': ddnsserver, 'user': user, 'password': password, } return self.execute_command('setDDNSConfig', params, callback=callback)
Set DDNS config.
def save(self): with open(self._user_config_file, 'w', encoding='utf-8') as f: self.write(f)
Write data to user config file.
def mark_offer_as_win(self, offer_id): return self._create_put_request( resource=OFFERS, billomat_id=offer_id, command=WIN, )
Mark offer as win :param offer_id: the offer id :return Response
def stop(self, timeout=15): pp = self.pid if pp: try: kill_process_nicely(pp, timeout=timeout) except psutil.NoSuchProcess: pass
Stop the subprocess. Keyword Arguments **timeout** Time in seconds to wait for a process and its children to exit.
def output(self, args): print("SensuPlugin: {}".format(' '.join(str(a) for a in args)))
Print the output message.
def create_repository(self): repo = repository.Repository( self.repository_config['repository'], self.username, self.password, self.disable_progress_bar ) repo.set_certificate_authority(self.cacert) repo.set_client_certificate(self.client_cert) return repo
Create a new repository for uploading.
def _fetch_xml(self, url): with contextlib.closing(urlopen(url)) as f: return xml.etree.ElementTree.parse(f).getroot()
Fetch a url and parse the document's XML.
def _build_function(self, function_name, codeuri, runtime): code_dir = str(pathlib.Path(self._base_dir, codeuri).resolve()) config = get_workflow_config(runtime, code_dir, self._base_dir) artifacts_dir = str(pathlib.Path(self._build_dir, function_name)) with osutils.mkdir_temp() as scratch_dir: manifest_path = self._manifest_path_override or os.path.join(code_dir, config.manifest_name) build_method = self._build_function_in_process if self._container_manager: build_method = self._build_function_on_container return build_method(config, code_dir, artifacts_dir, scratch_dir, manifest_path, runtime)
Given the function information, this method will build the Lambda function. Depending on the configuration it will either build the function in process or by spinning up a Docker container. Parameters ---------- function_name : str Name or LogicalId of the function codeuri : str Path to where the code lives runtime : str AWS Lambda function runtime Returns ------- str Path to the location where built artifacts are available
def uses_na_format(station: str) -> bool: if station[0] in NA_REGIONS: return True if station[0] in IN_REGIONS: return False if station[:2] in M_NA_REGIONS: return True if station[:2] in M_IN_REGIONS: return False raise BadStation("Station doesn't start with a recognized character set")
Returns True if the station uses the North American format, False if the International format
def schedule(self): assert self.collection_is_completed if self.collection is not None: for node in self.nodes: self.check_schedule(node) return if not self._check_nodes_have_same_collection(): self.log("**Different tests collected, aborting run**") return self.collection = list(self.node2collection.values())[0] self.pending[:] = range(len(self.collection)) if not self.collection: return initial_batch = max(len(self.pending) // 4, 2 * len(self.nodes)) nodes = cycle(self.nodes) for i in range(initial_batch): self._send_tests(next(nodes), 1) if not self.pending: for node in self.nodes: node.shutdown()
Initiate distribution of the test collection Initiate scheduling of the items across the nodes. If this gets called again later it behaves the same as calling ``.check_schedule()`` on all nodes so that newly added nodes will start to be used. This is called by the ``DSession.worker_collectionfinish`` hook if ``.collection_is_completed`` is True.
def ekacei(handle, segno, recno, column, nvals, ivals, isnull): handle = ctypes.c_int(handle) segno = ctypes.c_int(segno) recno = ctypes.c_int(recno) column = stypes.stringToCharP(column) nvals = ctypes.c_int(nvals) ivals = stypes.toIntVector(ivals) isnull = ctypes.c_int(isnull) libspice.ekacei_c(handle, segno, recno, column, nvals, ivals, isnull)
Add data to an integer column in a specified EK record. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekacei_c.html :param handle: EK file handle. :type handle: int :param segno: Index of segment containing record. :type segno: int :param recno: Record to which data is to be added. :type recno: int :param column: Column name. :type column: str :param nvals: Number of values to add to column. :type nvals: int :param ivals: Integer values to add to column. :type ivals: Array of ints :param isnull: Flag indicating whether column entry is null. :type isnull: bool
def Mux(fs, sel, simplify=True): if isinstance(sel, Expression): sel = [sel] if len(sel) < clog2(len(fs)): fstr = "expected at least {} select bits, got {}" raise ValueError(fstr.format(clog2(len(fs)), len(sel))) it = boolfunc.iter_terms(sel) y = exprnode.or_(*[exprnode.and_(f.node, *[lit.node for lit in next(it)]) for f in fs]) if simplify: y = y.simplify() return _expr(y)
Return an expression that multiplexes a sequence of input functions over a sequence of select functions.
def get_object_url(self): return self.bundle.get_view_url('edit', self.request.user, {}, self.kwargs)
Returns the url to link to the object The get_view_url will be called on the current bundle using 'edit` as the view name.
def remove(cls, target, exclude=None, ctx=None, select=lambda *p: True): exclude = () if exclude is None else exclude try: local_annotations = get_local_property( target, Annotation.__ANNOTATIONS_KEY__ ) except TypeError: raise TypeError('target {0} must be hashable'.format(target)) if local_annotations is not None: annotations_to_remove = [ annotation for annotation in local_annotations if ( isinstance(annotation, cls) and not isinstance(annotation, exclude) and select(target, ctx, annotation) ) ] for annotation_to_remove in annotations_to_remove: annotation_to_remove.remove_from(target)
Remove from target annotations which inherit from cls. :param target: target from where remove annotations which inherits from cls. :param tuple/type exclude: annotation types to exclude from selection. :param ctx: target ctx. :param select: annotation selection function which takes in parameters a target, a ctx and an annotation and return True if the annotation has to be removed.
def softmax(input_, labels=None, name=PROVIDED, loss_weight=None, per_example_weights=None): if labels is not None: full = input_.as_layer() return SoftmaxResult(input_.softmax_activation(), full.cross_entropy( labels, name=name, loss_weight=loss_weight, per_example_weights=per_example_weights)) else: return SoftmaxResult(input_.softmax_activation(), None)
Applies softmax and if labels is not None, then it also adds a loss. Args: input_: A rank 2 Tensor or a Pretty Tensor holding the logits. labels: The target labels to learn as a float tensor. Use None to not include a training loss. name: The optional name. loss_weight: A scalar multiplier for the loss. per_example_weights: A Tensor with a weight per example. Returns: A tuple of the a handle to softmax and a handle to the loss tensor. Raises: ValueError: If the datatype is wrong.
def listAttachments(self, oid): url = self._url + "/%s/attachments" % oid params = { "f":"json" } return self._get(url, params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url)
list attachements for a given OBJECT ID
def distance_correlation_af_inv_sqr(x, y): x = _af_inv_scaled(x) y = _af_inv_scaled(y) correlation = distance_correlation_sqr(x, y) return 0 if np.isnan(correlation) else correlation
Square of the affinely invariant distance correlation. Computes the estimator for the square of the affinely invariant distance correlation between two random vectors. .. warning:: The return value of this function is undefined when the covariance matrix of :math:`x` or :math:`y` is singular. Parameters ---------- x: array_like First random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. y: array_like Second random vector. The columns correspond with the individual random variables while the rows are individual instances of the random vector. Returns ------- numpy scalar Value of the estimator of the squared affinely invariant distance correlation. See Also -------- distance_correlation u_distance_correlation Examples -------- >>> import numpy as np >>> import dcor >>> a = np.array([[1, 3, 2, 5], ... [5, 7, 6, 8], ... [9, 10, 11, 12], ... [13, 15, 15, 16]]) >>> b = np.array([[1], [0], [0], [1]]) >>> dcor.distance_correlation_af_inv_sqr(a, a) 1.0 >>> dcor.distance_correlation_af_inv_sqr(a, b) # doctest: +ELLIPSIS 0.5773502... >>> dcor.distance_correlation_af_inv_sqr(b, b) 1.0
def repo_tools(self, branch): tools = [] m_helper = Tools() repo = self.parentApp.repo_value['repo'] version = self.parentApp.repo_value['versions'][branch] status = m_helper.repo_tools(repo, branch, version) if status[0]: r_tools = status[1] for tool in r_tools: tools.append(tool[0]) return tools
Set the appropriate repo dir and get the tools available of it
def set_context(pid_file, context_info): assert type(context_info) == dict port_file = get_context_file_name(pid_file) with open(port_file, "wt") as f: f.write(json.dumps(context_info))
Set context of running notebook. :param context_info: dict of extra context parameters, see comm.py comments
def message(self, data): logging.info('Driver sends framework message {}'.format(data)) return self.driver.sendFrameworkMessage(data)
Sends a message to the framework scheduler. These messages are best effort; do not expect a framework message to be retransmitted in any reliable fashion.
def get_token_from_header(request): token = None if 'Authorization' in request.headers: split_header = request.headers.get('Authorization').split() if len(split_header) == 2 and split_header[0] == 'Bearer': token = split_header[1] else: token = request.access_token return token
Helper function to extract a token from the request header. :param request: OAuthlib request. :type request: oauthlib.common.Request :return: Return the token or None if the Authorization header is malformed.
def templatesCollector(text, open, close): others = [] spans = [i for i in findBalanced(text, open, close)] spanscopy = copy(spans) for i in range(len(spans)): start, end = spans[i] o = text[start:end] ol = o.lower() if 'vaata|' in ol or 'wikitable' in ol: spanscopy.remove(spans[i]) continue others.append(o) text = dropSpans(spanscopy, text) return text, others
leaves related articles and wikitables in place
def hazard_notes(self): notes = [] hazard = definition(self.hazard.keywords.get('hazard')) if 'notes' in hazard: notes += hazard['notes'] if self.hazard.keywords['layer_mode'] == 'classified': if 'classified_notes' in hazard: notes += hazard['classified_notes'] if self.hazard.keywords['layer_mode'] == 'continuous': if 'continuous_notes' in hazard: notes += hazard['continuous_notes'] if self.hazard.keywords['hazard_category'] == 'single_event': if 'single_event_notes' in hazard: notes += hazard['single_event_notes'] if self.hazard.keywords['hazard_category'] == 'multiple_event': if 'multi_event_notes' in hazard: notes += hazard['multi_event_notes'] return notes
Get the hazard specific notes defined in definitions. This method will do a lookup in definitions and return the hazard definition specific notes dictionary. This is a helper function to make it easy to get hazard specific notes from the definitions metadata. .. versionadded:: 3.5 :returns: A list like e.g. safe.definitions.hazard_land_cover[ 'notes'] :rtype: list, None
def get_service_name(wrapped, instance, args, kwargs): if 'serviceAbbreviation' not in instance._service_model.metadata: return instance._service_model.metadata['endpointPrefix'] return instance._service_model.metadata['serviceAbbreviation']
Return the AWS service name the client is communicating with.
def _write_related_m2m_relations(self, obj, many_to_many_relationships): for fieldname, related_objs in many_to_many_relationships.items(): setattr(obj, fieldname, related_objs)
For the given `many_to_many_relationships` dict mapping field names to a list of object instances, apply the instance listing to the `obj`s named many-to-many relationship field.
def _rewrite_col(self, col): if isinstance(col, Col): new_name = rewrite_lookup_key(self.model, col.target.name) if col.target.name != new_name: new_field = self.model._meta.get_field(new_name) if col.target is col.source: col.source = new_field col.target = new_field elif hasattr(col, 'col'): self._rewrite_col(col.col) elif hasattr(col, 'lhs'): self._rewrite_col(col.lhs)
Django >= 1.7 column name rewriting
def phi_components_normalized(self): phi_components_normalized = {i: self.phi_components[i]/self.phi for i in self.phi_components} return phi_components_normalized
get the individual components of the total objective function normalized to the total PHI being 1.0 Returns ------- dict : dict dictionary of observation group, normalized contribution to total phi Raises ------ Assertion error if self.observation_data groups don't match self.res groups
def renumber(self): num = 0 for cell in self.cells: cell_split = cell.splitlines() if len(cell_split) >= 2: num += 1 cell_split[0] = str(num) yield '\n'.join(cell_split)
Re-number cells.
def _rest_patch(self, suburi, request_headers, request_body): return self._rest_op('PATCH', suburi, request_headers, request_body)
REST PATCH operation. HTTP response codes could be 500, 404, 202 etc.
def get_sdb_secret_version_paths(self, sdb_id): sdb_resp = get_with_retry(str.join('', [self.cerberus_url, '/v1/sdb-secret-version-paths/', sdb_id]), headers=self.HEADERS) throw_if_bad_response(sdb_resp) return sdb_resp.json()
Get SDB secret version paths. This function takes the sdb_id