Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
7,000
def tqxn(mt, x, n, t): return tpx(mt, x, t) * qx(mt, x + n)
n/qx : Probability to die in n years being alive at age x. Probability that x survives n year, and then dies in th subsequent t years
7,001
def create_routes_and_handler(transmute_func, context): @wraps(transmute_func.raw_func) def handler(): exc, result = None, None try: args, kwargs = ParamExtractorFlask().extract_params( context, transmute_func, request.content_type ) result = transmute_func(*args, **kwargs) except Exception as e: exc = e exc.__traceback__ = sys.exc_info()[2] response = transmute_func.process_result( context, result, exc, request.content_type ) return Response( response["body"], status=response["code"], mimetype=response["content-type"], headers=response["headers"] ) return ( _convert_paths_to_flask(transmute_func.paths), handler )
return back a handler that is the api generated from the transmute_func, and a list of routes it should be mounted to.
7,002
def geometry_checker(geometry): if geometry is None: return False, None if geometry.isGeosValid(): return True, geometry else: new_geom = geometry.makeValid() if new_geom.isGeosValid(): return False, new_geom else: return False, None
Perform a cleaning if the geometry is not valid. :param geometry: The geometry to check and clean. :type geometry: QgsGeometry :return: Tuple of bool and cleaned geometry. True if the geometry is already valid, False if the geometry was not valid. A cleaned geometry, or None if the geometry could not be repaired :rtype: (bool, QgsGeometry)
7,003
def read(self, device=None, offset=0, bs=None, count=1): volume = self.get_volume(device) block_size = bs or BLOCK_SIZE offset = int(offset) * block_size count = int(count) print("Offset: ", offset) total = 0 with directio.open(volume[], buffered=block_size) as file: file.seek(offset) for i in range(0, count): total += os.write(sys.stdout.fileno(), file.read(block_size)) os.write(sys.stdout.fileno(), "\nRead: %d Bytes\n" % total)
Using DIRECT_O read from the block device specified to stdout (Without any optional arguments will read the first 4k from the device)
7,004
def listen(self): _LOGGER.info() self._mcastsocket = self._create_mcast_socket() self._listening = True thread = Thread(target=self._listen_to_msg, args=()) self._threads.append(thread) thread.daemon = True thread.start()
Start listening.
7,005
def publish (self): self.lock.acquire() tw = cmdvel2Twist(self.vel) self.lock.release() if (self.jdrc.getState() == "flying"): self.pub.publish(tw)
Function to publish cmdvel.
7,006
def saveThumbnail(self,fileName,filePath): if self._thumbnail is None: self.__init() param_dict = {} if self._thumbnail is not None: imgUrl = self.root + "/info/" + self._thumbnail onlineFileName, file_ext = splitext(self._thumbnail) fileNameSafe = "".join(x for x in fileName if x.isalnum()) + file_ext result = self._get(url=imgUrl, param_dict=param_dict, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, out_folder=filePath, file_name=fileNameSafe) return result else: return None
URL to the thumbnail used for the item
7,007
def method(cache_name, key_prefix=None): def decorator(func): if (func.__name__ in [, ] and not config.CACHE_REPERTOIRES): return func @wraps(func) def wrapper(obj, *args, **kwargs): cache = getattr(obj, cache_name) key = cache.key(*args, _prefix=key_prefix, **kwargs) value = cache.get(key) if value is None: value = func(obj, *args, **kwargs) cache.set(key, value) return value return wrapper return decorator
Caching decorator for object-level method caches. Cache key generation is delegated to the cache. Args: cache_name (str): The name of the (already-instantiated) cache on the decorated object which should be used to store results of this method. *key_prefix: A constant to use as part of the cache key in addition to the method arguments.
7,008
def read_var_str(self, max_size=sys.maxsize): length = self.read_var_int(max_size) return self.unpack(str(length) + , length)
Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max_size (int): (Optional) maximum number of bytes to read. Returns: bytes:
7,009
def pathname(self): slug = self.name slug = unidecode.unidecode(slug) slug = slug.replace("-", "") slug = re.sub(r"[^\w\.]+", "-", slug).strip("-") return os.path.join(*slug.split("."))
Sluggified path for filenames Slugs to a filename using the follow steps * Decode unicode to approximate ascii * Remove existing hypens * Substitute hyphens for non-word characters * Break up the string as paths
7,010
def _tokenize(self, source, name, filename=None, state=None): source = self.preprocess(source, name, filename) stream = self.lexer.tokenize(source, name, filename, state) for ext in self.iter_extensions(): stream = ext.filter_stream(stream) if not isinstance(stream, TokenStream): stream = TokenStream(stream, name, filename) return stream
Called by the parser to do the preprocessing and filtering for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
7,011
def VShadowPathSpecGetStoreIndex(path_spec): store_index = getattr(path_spec, , None) if store_index is None: location = getattr(path_spec, , None) if location is None or not location.startswith(): return None store_index = None try: store_index = int(location[4:], 10) - 1 except (TypeError, ValueError): pass if store_index is None or store_index < 0: return None return store_index
Retrieves the store index from the path specification. Args: path_spec (PathSpec): path specification. Returns: int: store index or None if not available.
7,012
def videos(self, **kwargs): path = self._get_series_id_season_number_episode_number_path() response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the videos that have been added to a TV episode (teasers, clips, etc...). Args: language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API.
7,013
def make_items_for(brains_or_objects, endpoint=None, complete=False): include_children = req.get_children(False) def extract_data(brain_or_object): info = get_info(brain_or_object, endpoint=endpoint, complete=complete) if include_children and is_folderish(brain_or_object): info.update(get_children_info(brain_or_object, complete=complete)) return info return map(extract_data, brains_or_objects)
Generate API compatible data items for the given list of brains/objects :param brains_or_objects: List of objects or brains :type brains_or_objects: list/Products.ZCatalog.Lazy.LazyMap :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: A list of extracted data items :rtype: list
7,014
def can_delete_objectives(self): url_path = construct_url(, bank_id=self._catalog_idstr) return self._get_request(url_path)[][]
Tests if this user can delete Objectives. A return of true does not guarantee successful authorization. A return of false indicates that it is known deleting an Objective will result in a PermissionDenied. This is intended as a hint to an application that may opt not to offer delete operations to an unauthorized user. return: (boolean) - false if Objective deletion is not authorized, true otherwise compliance: mandatory - This method must be implemented.
7,015
def get_portal_url_base(self): api_url = urlparse(self.url).hostname portal_url = re.sub(, , api_url) portal_url = re.sub(r, , portal_url) return portal_url
Determine root url of the data service from the url specified. :return: str root url of the data service (eg: https://dataservice.duke.edu)
7,016
def running_conversions(self, folder_id=None): params = {: folder_id} if folder_id else {} return self._get(, params=params)
Shows running file converts by folder Note: If folder_id is not provided, ``Home`` folder will be used. Args: folder_id (:obj:`str`, optional): id of the folder to list conversions of files exist in it. Returns: list: list of dictionaries, each dictionary represents a file conversion info. :: [ { "name": "Geysir.AVI", "id": "3565411", "status": "pending", "last_update": "2015-08-23 19:41:40", "progress": 0.32, "retries": "0", "link": "https://openload.co/f/f02JFG293J8/Geysir.AVI", "linkextid": "f02JFG293J8" }, .... ]
7,017
def _make_probs(self, *sequences): sequences = self._get_counters(*sequences) counts = self._sum_counters(*sequences) if self.terminator is not None: counts[self.terminator] = 1 total_letters = sum(counts.values()) prob_pairs = {} cumulative_count = 0 counts = sorted(counts.items(), key=lambda x: (x[1], x[0]), reverse=True) for char, current_count in counts: prob_pairs[char] = ( Fraction(cumulative_count, total_letters), Fraction(current_count, total_letters), ) cumulative_count += current_count assert cumulative_count == total_letters return prob_pairs
https://github.com/gw-c/arith/blob/master/arith.py
7,018
def correctX(args): p = OptionParser(correctX.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, tag = args tag = tag.split(",") for p, pf in iter_project(folder): correct_pairs(p, pf, tag)
%prog correctX folder tag Run ALLPATHS correction on a folder of paired reads and apply tag.
7,019
def render(self, name=None, template=None, context={}): Render Template meta from jinja2 templates. ' if isinstance(template, Template): _template = template else: _template = Template.objects.get(name=name) response = self.env.from_string( _template.content).render(context) return response
Render Template meta from jinja2 templates.
7,020
def tricu(P, k=0): tri = numpy.sum(numpy.mgrid[[slice(0,_,1) for _ in P.shape]], 0) tri = tri<len(tri) + k if isinstance(P, Poly): A = P.A.copy() B = {} for key in P.keys: B[key] = A[key]*tri return Poly(B, shape=P.shape, dim=P.dim, dtype=P.dtype) out = P*tri return out
Cross-diagonal upper triangle.
7,021
def add_access_list(self, accesslist, rank=None): self.conditions.append( dict(access_list_ref=accesslist.href, type=, rank=rank))
Add an access list to the match condition. Valid access list types are IPAccessList (v4 and v6), IPPrefixList (v4 and v6), AS Path, CommunityAccessList, ExtendedCommunityAccessList.
7,022
def syscall(self, func): retvalueexception if getattr(self, , None) is not None: return None self.syscallfunc = func self.syscallmatcher = SyscallReturnEvent.createMatcher() return self.syscallmatcher
Call the func in core context (main loop). func should like:: def syscall_sample(scheduler, processor): something... where processor is a function which accept an event. When calling processor, scheduler directly process this event without sending it to queue. An event matcher is returned to the caller, and the caller should wait for the event immediately to get the return value from the system call. The SyscallReturnEvent will have 'retvalue' as the return value, or 'exception' as the exception thrown: (type, value, traceback) :param func: syscall function :returns: an event matcher to wait for the SyscallReturnEvent. If None is returned, a syscall is already scheduled; return to core context at first.
7,023
def is_tuple_type(tp): if NEW_TYPING: return (tp is Tuple or isinstance(tp, _GenericAlias) and tp.__origin__ is tuple or isinstance(tp, type) and issubclass(tp, Generic) and issubclass(tp, tuple)) return type(tp) is TupleMeta
Test if the type is a generic tuple type, including subclasses excluding non-generic classes. Examples:: is_tuple_type(int) == False is_tuple_type(tuple) == False is_tuple_type(Tuple) == True is_tuple_type(Tuple[str, int]) == True class MyClass(Tuple[str, int]): ... is_tuple_type(MyClass) == True For more general tests use issubclass(..., tuple), for more precise test (excluding subclasses) use:: get_origin(tp) is tuple # Tuple prior to Python 3.7
7,024
def _get_value_from_match(self, key, match): value = match.groups(1)[0] clean_value = str(value).lstrip().rstrip() if clean_value == : self._log.info(, key) return True if clean_value == : self._log.info(, key) return False try: float_value = float(clean_value) self._log.info(, key, float_value) return float_value except ValueError: self._log.info(, key, clean_value) return clean_value
Gets the value of the property in the given MatchObject. Args: key (str): Key of the property looked-up. match (MatchObject): The matched property. Return: The discovered value, as a string or boolean.
7,025
def get_checkerboard_matrix(kernel_width): return np.vstack(( np.hstack(( -1 * np.ones((kernel_width, kernel_width)), np.ones((kernel_width, kernel_width)) )), np.hstack(( np.ones((kernel_width, kernel_width)), -1 * np.ones((kernel_width, kernel_width)) )) ))
example matrix for width = 2 -1 -1 1 1 -1 -1 1 1 1 1 -1 -1 1 1 -1 -1 :param kernel_width: :return:
7,026
def stripped_db_url(url): parsed = urlparse(url) if parsed.password is None: return url return parsed._replace( netloc="{}:***@{}".format(parsed.username, parsed.hostname) ).geturl()
Return a version of the DB url with the password stripped out.
7,027
def format_config(sensor_graph): cmdfile = CommandFile("Config Variables", "1.0") for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()): for conf_var, conf_def in sorted(sensor_graph.config_database[slot].items()): conf_type, conf_val = conf_def if conf_type == : conf_val = + hexlify(conf_val) cmdfile.add("set_variable", slot, conf_var, conf_type, conf_val) return cmdfile.dump()
Extract the config variables from this sensor graph in ASCII format. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: str: The ascii output lines concatenated as a single string
7,028
def start(component, exact): from peltak.extra.gitflow import logic logic.release.start(component, exact)
Create a new release. It will bump the current version number and create a release branch called `release/<version>` with one new commit (the version bump). **Example Config**:: \b version_file: 'src/mypkg/__init__.py' **Examples**:: \b $ peltak release start patch # Make a new patch release $ peltak release start minor # Make a new minor release $ peltak release start major # Make a new major release $ peltak release start # same as start patch
7,029
def setShadowed(self, state): self._shadowed = state if state: self._colored = False for child in self.findChildren(XToolButton): child.setShadowed(state)
Sets whether or not this toolbar is shadowed. :param state | <bool>
7,030
def subnet_get(auth=None, **kwargs): * cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_subnet(**kwargs)
Get a single subnet filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.subnet_get name=subnet1
7,031
def add_algorithm(self, parser): help = % { : ca_settings.CA_DIGEST_ALGORITHM.name, } parser.add_argument( , metavar=, default=ca_settings.CA_DIGEST_ALGORITHM, action=AlgorithmAction, help=help)
Add the --algorithm option.
7,032
def sphergal_to_rectgal(l,b,d,vr,pmll,pmbb,degree=False): XYZ= lbd_to_XYZ(l,b,d,degree=degree) vxvyvz= vrpmllpmbb_to_vxvyvz(vr,pmll,pmbb,l,b,d,XYZ=False,degree=degree) if sc.array(l).shape == (): return sc.array([XYZ[0],XYZ[1],XYZ[2],vxvyvz[0],vxvyvz[1],vxvyvz[2]]) else: out=sc.zeros((len(l),6)) out[:,0:3]= XYZ out[:,3:6]= vxvyvz return out
NAME: sphergal_to_rectgal PURPOSE: transform phase-space coordinates in spherical Galactic coordinates to rectangular Galactic coordinates (can take vector inputs) INPUT: l - Galactic longitude (rad) b - Galactic lattitude (rad) d - distance (kpc) vr - line-of-sight velocity (km/s) pmll - proper motion in the Galactic longitude direction (mu_l*cos(b) ) (mas/yr) pmbb - proper motion in the Galactic lattitude (mas/yr) degree - (bool) if True, l and b are in degrees OUTPUT: (X,Y,Z,vx,vy,vz) in (kpc,kpc,kpc,km/s,km/s,km/s) HISTORY: 2009-10-25 - Written - Bovy (NYU)
7,033
def key(self, *args, **kwargs): if not args and not kwargs: return self.class_path try: if args and not kwargs: return "%s:%s" % (self.class_path, self.hash(args)) return "%s:%s:%s:%s" % (self.class_path, self.hash(args), self.hash([k for k in sorted(kwargs)]), self.hash([kwargs[k] for k in sorted(kwargs)])) except TypeError: raise RuntimeError( "Unable to generate cache key due to unhashable" "args or kwargs - you need to implement your own" "key generation method to avoid this problem")
Return the cache key to use. If you're passing anything but primitive types to the ``get`` method, it's likely that you'll need to override this method.
7,034
def execution(): client = salt.client.get_local_client(__opts__[]) docs = {} try: for ret in client.cmd_iter(, , timeout=__opts__[]): for v in six.itervalues(ret): docs.update(v) except SaltClientError as exc: print(exc) return [] i = itertools.chain.from_iterable([six.iteritems(docs[])]) ret = dict(list(i)) return ret
Collect all the sys.doc output from each minion and return the aggregate CLI Example: .. code-block:: bash salt-run doc.execution
7,035
def flush(self): if self.redirect is not None: self.redirect.flush() super(TeeStringIO, self).flush()
Flush to this and the redirected stream
7,036
def data(self): if self._next_update and datetime.now() > self._next_update: self.update() return self._data
Get a cached post-processed result of a GitHub API call. Uses Trac cache to avoid constant querying of the remote API. If a previous API call did not succeed, automatically retries after a timeout.
7,037
def ternarize(x, thresh=0.05): shape = x.get_shape() thre_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)) * thresh) w_p = tf.get_variable(, initializer=1.0, dtype=tf.float32) w_n = tf.get_variable(, initializer=1.0, dtype=tf.float32) tf.summary.scalar(w_p.op.name + , w_p) tf.summary.scalar(w_n.op.name + , w_n) mask = tf.ones(shape) mask_p = tf.where(x > thre_x, tf.ones(shape) * w_p, mask) mask_np = tf.where(x < -thre_x, tf.ones(shape) * w_n, mask_p) mask_z = tf.where((x < thre_x) & (x > - thre_x), tf.zeros(shape), mask) @tf.custom_gradient def _sign_mask(x): return tf.sign(x) * mask_z, lambda dy: dy w = _sign_mask(x) w = w * mask_np tf.summary.histogram(w.name, w) return w
Implemented Trained Ternary Quantization: https://arxiv.org/abs/1612.01064 Code modified from the authors' at: https://github.com/czhu95/ternarynet/blob/master/examples/Ternary-Net/ternary.py
7,038
def from_env(cls, prefix, kms_decrypt=False, aws_profile=None): if len(prefix) < 1: raise ValueError("prefix can_'!") if not prefix.endswith("_"): prefix = prefix + "_" data = dict( host=os.getenv(prefix + "HOST"), port=os.getenv(prefix + "PORT"), database=os.getenv(prefix + "DATABASE"), username=os.getenv(prefix + "USERNAME"), password=os.getenv(prefix + "PASSWORD"), ) if kms_decrypt is True: import boto3 from base64 import b64decode if aws_profile is not None: kms = boto3.client("kms") else: ses = boto3.Session(profile_name=aws_profile) kms = ses.client("kms") def decrypt(kms, text): return kms.decrypt( CiphertextBlob=b64decode(text.encode("utf-8")) )["Plaintext"].decode("utf-8") data = { key: value if value is None else decrypt(kms, str(value)) for key, value in data.items() } return cls(**data)
Load database credential from env variable. - host: ENV.{PREFIX}_HOST - port: ENV.{PREFIX}_PORT - database: ENV.{PREFIX}_DATABASE - username: ENV.{PREFIX}_USERNAME - password: ENV.{PREFIX}_PASSWORD :param prefix: str :param kms_decrypt: bool :param aws_profile: str
7,039
def red_ext(request, message=None): t have site access to the external page. Parameters: request - the request in the calling function message - a message from the caller function external'))
The external landing. Also a convenience function for redirecting users who don't have site access to the external page. Parameters: request - the request in the calling function message - a message from the caller function
7,040
def evaluate(data_source, batch_size, ctx=None): total_L = 0 hidden = cache_cell.\ begin_state(func=mx.nd.zeros, batch_size=batch_size, ctx=context[0]) next_word_history = None cache_history = None for i in range(0, len(data_source) - 1, args.bptt): if i > 0: print(% (i, len(data_source), math.exp(total_L/i))) data, target = get_batch(data_source, i) data = data.as_in_context(ctx) target = target.as_in_context(ctx) L = 0 outs, next_word_history, cache_history, hidden = \ cache_cell(data, target, next_word_history, cache_history, hidden) for out in outs: L += (-mx.nd.log(out)).asscalar() total_L += L / data.shape[1] hidden = detach(hidden) return total_L / len(data_source)
Evaluate the model on the dataset with cache model. Parameters ---------- data_source : NDArray The dataset is evaluated on. batch_size : int The size of the mini-batch. ctx : mx.cpu() or mx.gpu() The context of the computation. Returns ------- loss: float The loss on the dataset
7,041
def node_hist_fig( node_color_distribution, title="Graph Node Distribution", width=400, height=300, top=60, left=25, bottom=60, right=25, bgcolor="rgb(240,240,240)", y_gridcolor="white", ): text = [ "{perc}%".format(**locals()) for perc in [d["perc"] for d in node_color_distribution] ] pl_hist = go.Bar( y=[d["height"] for d in node_color_distribution], marker=dict(color=[d["color"] for d in node_color_distribution]), text=text, hoverinfo="y+text", ) hist_layout = dict( title=title, width=width, height=height, font=dict(size=12), xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False), yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)), bargap=0.01, margin=dict(l=left, r=right, b=bottom, t=top), hovermode="x", plot_bgcolor=bgcolor, ) return go.FigureWidget(data=[pl_hist], layout=hist_layout)
Define the plotly plot representing the node histogram Parameters ---------- node_color_distribution: list of dicts describing the build_histogram width, height: integers - width and height of the histogram FigureWidget left, top, right, bottom: ints; number of pixels around the FigureWidget bgcolor: rgb of hex color code for the figure background color y_gridcolor: rgb of hex color code for the yaxis y_gridcolor Returns ------- FigureWidget object representing the histogram of the graph nodes
7,042
def __set_URL(self, url): if isinstance(url, str): url.encode("ascii") self.url = url elif sys.version_info < (3, 0): self.url = url.encode("ascii") else: self.url = url.decode("ascii")
URL is stored as a str internally and must not contain ASCII chars. Raised exception in case of detected non-ASCII URL characters may be either UnicodeEncodeError or UnicodeDecodeError, depending on the used Python version's str type and the exact value passed as URL input data.
7,043
def get_events(self, **kwargs): force = kwargs.pop(, False) response = api.request_sync_events(self.blink, self.network_id, force=force) try: return response[] except (TypeError, KeyError): _LOGGER.error("Could not extract events: %s", response, exc_info=True) return False
Retrieve events from server.
7,044
def _build_sql_query( self): self.log.info() ra1, ra2, dec1, dec2 = self.ra1, self.ra2, self.dec1, self.dec2 if self.galaxyType == "all": self.sqlQuery = u % locals() elif self.galaxyType == "specz": self.sqlQuery = u % locals() elif self.galaxyType == "photoz": self.sqlQuery = u % locals() elif self.galaxyType == False or not self.galaxyType: self.sqlQuery = u % locals() self.sqlQuery = self.sqlQuery.strip() self.log.info() return None
*build sql query for the sdss square search* **Key Arguments:** # - **Return:** - None .. todo::
7,045
def _update_targets(vesseldicts, environment_dict): nodelist = [] for vesseldict in vesseldicts: nodeip_port = vesseldict[]++str(vesseldict[]) if not nodeip_port in nodelist: nodelist.append(nodeip_port) seash_global_variables.targets[] = [] print nodelist retdict = seash_helper.contact_targets( nodelist, seash_helper.browse_target, environment_dict[], ) for nodename in retdict: if retdict[nodename][0]: newidlist = newidlist + retdict[nodename][1] else: faillist.append(nodename) seash_helper.print_vessel_errors(retdict) if len(newidlist) == 0: print "Could not add any new targets." else: print "Added targets: "+", ".join(newidlist) if len(seash_global_variables.targets[]) > 0: num_targets = str(len(seash_global_variables.targets[])) print "Added group with "+num_targets+" targets"
<Purpose> Connects to the nodes in the vesseldicts and adds them to the list of valid targets. <Arguments> vesseldicts: A list of vesseldicts obtained through SeattleClearinghouseClient calls. <Side Effects> All valid targets that the user can access on the specified nodes are added to the list of targets. <Exceptions> None <Returns> None
7,046
def list_networks(**kwargs): * conn = __get_conn(**kwargs) try: return [net.name() for net in conn.listAllNetworks()] finally: conn.close()
List all virtual networks. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.list_networks
7,047
def accept(self, origin, protocol): for factory in self.store.powerupsFor(IBoxReceiverFactory): raise ProtocolUnknown()
Create a new route attached to a L{IBoxReceiver} created by the L{IBoxReceiverFactory} with the indicated protocol. @type origin: C{unicode} @param origin: The identifier of a route on the peer which will be associated with this connection. Boxes sent back by the protocol which is created in this call will be sent back to this route. @type protocol: C{unicode} @param protocol: The name of the protocol to which to establish a connection. @raise ProtocolUnknown: If no factory can be found for the named protocol. @return: A newly created C{unicode} route identifier for this connection (as the value of a C{dict} with a C{'route'} key).
7,048
def paintEvent(self, event): pixmap = self.currentPixmap() rect = self.currentPixmapRect() with XPainter(self) as painter: painter.drawPixmap(rect.x(), rect.y(), pixmap)
Draws the pixmap for this widget. :param event | <QPaintEvent>
7,049
def cp(hdfs_src, hdfs_dst): cmd = "hadoop fs -cp %s %s" % (hdfs_src, hdfs_dst) rcode, stdout, stderr = _checked_hadoop_fs_command(cmd)
Copy a file :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful
7,050
def _readClusterSettings(self): mdUrl = "http://169.254.169.254/metadata/instance?api-version=2017-08-01" header = {: } request = urllib.request.Request(url=mdUrl, headers=header) response = urllib.request.urlopen(request) data = response.read() dataStr = data.decode("utf-8") metadata = json.loads(dataStr) self._zone = metadata[][] self.clusterName = metadata[][] tagsStr = metadata[][] tags = dict(item.split(":") for item in tagsStr.split(";")) self._owner = tags.get(, ) leader = self.getLeader() self._leaderPrivateIP = leader.privateIP self._setSSH() self._masterPublicKeyFile = self.LEADER_HOME_DIR + map(lambda x: self._addToHosts(x), self.getProvisionedWorkers(None))
Read the current instance's meta-data to get the cluster settings.
7,051
def get_top_paths(self): headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/traffic/popular/paths" ) if isinstance(data, list): return [ github.Path.Path(self._requester, headers, item, completed=True) for item in data ]
:calls: `GET /repos/:owner/:repo/traffic/popular/paths <https://developer.github.com/v3/repos/traffic/>`_ :rtype: :class:`list` of :class:`github.Path.Path`
7,052
def apply(self, s, active=None): if active is None: active = self.active return self.group.apply(s, active=active)
Apply the REPP's rewrite rules to the input string *s*. Args: s (str): the input string to process active (optional): a collection of external module names that may be applied if called Returns: a :class:`REPPResult` object containing the processed string and characterization maps
7,053
def merge_dict(self, *args, **kwargs): input_dict = self._convert_input(*args, **kwargs) if input_dict: self._sift_and_init_configs(input_dict) self.token_dict.merge_serialization(input_dict)
Takes variable inputs, compiles them into a dictionary then merges it to the current nomenclate's state :param args: (dict, Nomenclate), any number of dictionary inputs or Nomenclates to be converted to dicts :param kwargs: str, any number of kwargs that represent token:value pairs
7,054
def _run_train_step(self, data, mode=): epoch_size = ((len(data) // self.batch_size) - 1) // self.num_steps costs = 0.0 iters = 0 step = 0 state = self._init_state.eval() op = self._train_op if mode == else tf.no_op() for step, (x, y) in enumerate( utilities.seq_data_iterator( data, self.batch_size, self.num_steps)): cost, state, _ = self.tf_session.run( [self.cost, self.final_state, op], {self.input_data: x, self.input_labels: y, self._init_state: state}) costs += cost iters += self.num_steps if step % (epoch_size // 10) == 10: print("%.3f perplexity" % (step * 1.0 / epoch_size)) return np.exp(costs / iters)
Run a single training step. :param data: input data :param mode: 'train' or 'test'.
7,055
def partition_agent(host): network.save_iptables(host) network.flush_all_rules(host) network.allow_all_traffic(host) network.run_iptables(host, ALLOW_SSH) network.run_iptables(host, ALLOW_PING) network.run_iptables(host, DISALLOW_MESOS) network.run_iptables(host, DISALLOW_INPUT)
Partition a node from all network traffic except for SSH and loopback :param hostname: host or IP of the machine to partition from the cluster
7,056
def get_context_dict(self): context_dict = {} for s in self.sections(): for k, v in self.manifest.items(s): context_dict["%s:%s" % (s, k)] = v for k, v in self.inputs.values().items(): context_dict["config:{0}".format(k)] = v context_dict.update(self.additional_context_variables.items()) context_dict.update(dict([("%s|escaped" % k, re.escape(str(v) or "")) for k, v in context_dict.items()])) return context_dict
return a context dict of the desired state
7,057
def unset(self, host, *args): self.__check_host_args(host, args) remove_idx = [idx for idx, x in enumerate(self.lines_) if x.host == host and x.key.lower() in args] for idx in reversed(sorted(remove_idx)): del self.lines_[idx]
Removes settings for a host. Parameters ---------- host : the host to remove settings from. *args : list of settings to removes.
7,058
def search_elementnames(self, *substrings: str, name: str = ) -> : try: selection = Selection(name) for element in self.elements: for substring in substrings: if substring in element.name: selection.elements += element break return selection except BaseException: values = objecttools.enumeration(substrings) objecttools.augment_excmessage( f f f)
Return a new selection containing all elements of the current selection with a name containing at least one of the given substrings. >>> from hydpy.core.examples import prepare_full_example_2 >>> hp, pub, _ = prepare_full_example_2() Pass the (sub)strings as positional arguments and, optionally, the name of the newly created |Selection| object as a keyword argument: >>> test = pub.selections.complete.copy('test') >>> from hydpy import prepare_model >>> test.search_elementnames('dill', 'lahn_1') Selection("elementnames", nodes=(), elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) Wrong string specifications result in errors like the following: >>> test.search_elementnames(['dill', 'lahn_1']) Traceback (most recent call last): ... TypeError: While trying to determine the elements of selection \ `test` with names containing at least one of the given substrings \ `['dill', 'lahn_1']`, the following error occurred: 'in <string>' \ requires string as left operand, not list Method |Selection.select_elementnames| restricts the current selection to the one determined with the method |Selection.search_elementnames|: >>> test.select_elementnames('dill', 'lahn_1') Selection("test", nodes=("dill", "lahn_1", "lahn_2", "lahn_3"), elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2", "stream_lahn_1_lahn_2")) On the contrary, the method |Selection.deselect_elementnames| restricts the current selection to all devices not determined by the method |Selection.search_elementnames|: >>> pub.selections.complete.deselect_elementnames('dill', 'lahn_1') Selection("complete", nodes=("dill", "lahn_1", "lahn_2", "lahn_3"), elements=("land_lahn_2", "land_lahn_3", "stream_lahn_2_lahn_3"))
7,059
def _create_index_content(words): content = ["<h1>Index</h1>", "<ul>"] for word in words: content.append( .format(word=word) ) content.append("</ul>") if not words: content.append("<i>Nothing to see here ...yet!</i>") return "\n".join(content)
Create html string of index file. Parameters ---------- words : list of str List of cached words. Returns ------- str html string.
7,060
def setOverlayAlpha(self, ulOverlayHandle, fAlpha): fn = self.function_table.setOverlayAlpha result = fn(ulOverlayHandle, fAlpha) return result
Sets the alpha of the overlay quad. Use 1.0 for 100 percent opacity to 0.0 for 0 percent opacity.
7,061
def get_info(self, full=False): " Return printable information about current site. " if full: context = self.as_dict() return "".join("{0:<25} = {1}\n".format( key, context[key]) for key in sorted(context.iterkeys())) return "%s [%s]" % (self.get_name(), self.template)
Return printable information about current site.
7,062
def ProcessPathSpec(self, mediator, path_spec): self.last_activity_timestamp = time.time() self.processing_status = definitions.STATUS_INDICATOR_RUNNING file_entry = path_spec_resolver.Resolver.OpenFileEntry( path_spec, resolver_context=mediator.resolver_context) if file_entry is None: display_name = mediator.GetDisplayNameForPathSpec(path_spec) logger.warning( .format( display_name)) self.processing_status = definitions.STATUS_INDICATOR_IDLE return mediator.SetFileEntry(file_entry) try: if file_entry.IsDirectory(): self._ProcessDirectory(mediator, file_entry) self._ProcessFileEntry(mediator, file_entry) finally: mediator.ResetFileEntry() self.last_activity_timestamp = time.time() self.processing_status = definitions.STATUS_INDICATOR_IDLE
Processes a path specification. Args: mediator (ParserMediator): mediates the interactions between parsers and other components, such as storage and abort signals. path_spec (dfvfs.PathSpec): path specification.
7,063
def b64_decode(data: bytes) -> bytes: missing_padding = len(data) % 4 if missing_padding != 0: data += b * (4 - missing_padding) return urlsafe_b64decode(data)
:param data: Base 64 encoded data to decode. :type data: bytes :return: Base 64 decoded data. :rtype: bytes
7,064
def slicing_singlevalue(arg, length): if isinstance(arg, slice): start, stop, step = arg.indices(length) i = start if step > 0: while i < stop: yield i i += step else: while i > stop: yield i i += step else: try: i = arg.__index__() except AttributeError: raise TypeError("indices must be integers or slices, not " + arg.__class__.__name__) if i < 0: i += length yield i
Internally used.
7,065
def _check_reach_env(): path_to_reach = get_config() if path_to_reach is None: path_to_reach = environ.get(, None) if path_to_reach is None or not path.exists(path_to_reach): raise ReachError( ) logger.debug( % path_to_reach) reach_version = get_config() if reach_version is None: reach_version = environ.get(, None) if reach_version is None: logger.debug() m = re.match(, path.basename(path_to_reach)) reach_version = re.sub(, , m.groups()[0]) logger.debug( % reach_version) return path_to_reach, reach_version
Check that the environment supports runnig reach.
7,066
def volumes(self, assets, dt): market_open, prev_dt, dt_value, entries = self._prelude(dt, ) volumes = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): volumes.append(0) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, ) entries[asset] = (dt_value, val) volumes.append(val) continue else: try: last_visited_dt, last_total = entries[asset] if last_visited_dt == dt_value: volumes.append(last_total) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, ) val += last_total entries[asset] = (dt_value, val) volumes.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz=) window = self._minute_reader.load_raw_arrays( [], after_last, dt, [asset], )[0] val = np.nansum(window) + last_total entries[asset] = (dt_value, val) volumes.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( [], market_open, dt, [asset], )[0] val = np.nansum(window) entries[asset] = (dt_value, val) volumes.append(val) continue return np.array(volumes)
The volume field's aggregation returns the sum of all volumes between the market open and the `dt` If there has been no data on or before the `dt` the volume is 0. Returns ------- np.array with dtype=int64, in order of assets parameter.
7,067
def _post_init(self): try: return self.postinit() except Exception as exc: return self._onerror(Result.from_exception(exc, uuid=self.uuid))
A post init trigger
7,068
def is_active(self): if not self.multiplexer: return False if self._is_active: return True if self._thread_for_determining_is_active: return self._is_active new_thread = threading.Thread( target=self._determine_is_active, name=) self._thread_for_determining_is_active = new_thread new_thread.start() return False
Determines whether this plugin is active. This plugin is only active if any run has an embedding. Returns: Whether any run has embedding data to show in the projector.
7,069
def remove_product_version(self, id, product_version_id, **kwargs): kwargs[] = True if kwargs.get(): return self.remove_product_version_with_http_info(id, product_version_id, **kwargs) else: (data) = self.remove_product_version_with_http_info(id, product_version_id, **kwargs) return data
Removes a product version from the specified config set This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.remove_product_version(id, product_version_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Build configuration set id (required) :param int product_version_id: Product version id (required) :return: None If the method is called asynchronously, returns the request thread.
7,070
def add_dns(ip, interface=, index=1): * servers = get_dns_servers(interface) if servers is False: return False try: if servers[index - 1] == ip: return True except IndexError: pass if ip in servers: rm_dns(ip, interface) cmd = [, , , , , interface, ip, .format(index), ] return __salt__[](cmd, python_shell=False) == 0
Add the DNS server to the network interface (index starts from 1) Note: if the interface DNS is configured by DHCP, all the DNS servers will be removed from the interface and the requested DNS will be the only one CLI Example: .. code-block:: bash salt '*' win_dns_client.add_dns <ip> <interface> <index>
7,071
async def load_credentials(self, credentials): split = credentials.split() self.identifier = split[0] self.srp.initialize(binascii.unhexlify(split[1])) _LOGGER.debug(, credentials)
Load existing credentials.
7,072
def _serialize_normalized_array(array, fmt=, quality=70): dtype = array.dtype assert np.issubdtype(dtype, np.unsignedinteger) assert np.max(array) <= np.iinfo(dtype).max assert array.shape[-1] > 1 image = PIL.Image.fromarray(array) image_bytes = BytesIO() image.save(image_bytes, fmt, quality=quality) image_data = image_bytes.getvalue() return image_data
Given a normalized array, returns byte representation of image encoding. Args: array: NumPy array of dtype uint8 and range 0 to 255 fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer
7,073
def start(self): if self.use_user_options: user_options = self.user_options else: user_options = {} self.log.warn("user_options: {}".format(user_options)) service = yield self.get_service() if service is None: if in user_options: self.server_name = user_options[] if hasattr(self, ) and self.container_spec is not None: container_spec = dict(**self.container_spec) elif user_options == {}: raise("A container_spec is needed in to create a service") container_spec.update(user_options.get(, {})) container_spec[] = [] for mount in self.container_spec[]: m = dict(**mount) if in m: m[] = m[].format( username=self.service_owner) if in m: device = m[][][].format( username=self.service_owner ) m[][][] = device m[] = docker.types.DriverConfig( **m[]) container_spec[].append(docker.types.Mount(**m)) container_spec[] = self.get_env() if hasattr(self, ): resource_spec = self.resource_spec resource_spec.update(user_options.get(, {})) if hasattr(self, ): networks = self.networks if user_options.get() is not None: networks = user_options.get() if hasattr(self, ): placement = self.placement if user_options.get() is not None: placement = user_options.get() image = container_spec[] del container_spec[] container_spec = docker.types.ContainerSpec( image, **container_spec) resources = docker.types.Resources(**resource_spec) task_spec = {: container_spec, : resources, : placement } task_tmpl = docker.types.TaskTemplate(**task_spec) resp = yield self.docker(, task_tmpl, name=self.service_name, networks=networks) self.service_id = resp[] self.log.info( "Created Docker service (id: %s) from image %s", self.service_name, self.service_id[:7], image) else: self.log.info( "Found existing Docker service (id: %s)", self.service_name, self.service_id[:7]) envs = service[][][][] for line in envs: if line.startswith(): self.api_token = line.split(, 1)[1] break ip = self.service_name port = self.service_port return (ip, port)
Start the single-user server in a docker service. You can specify the params for the service through jupyterhub_config.py or using the user_options
7,074
def traceroute(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4=None, filter=None, timeout=2, verbose=None, **kargs): if verbose is None: verbose = conf.verb if filter is None: filter = "(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))" if l4 is None: a, b = sr(IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / TCP(seq=RandInt(), sport=sport, dport=dport), timeout=timeout, filter=filter, verbose=verbose, **kargs) else: filter = "ip" a, b = sr(IP(dst=target, id=RandShort(), ttl=(minttl, maxttl)) / l4, timeout=timeout, filter=filter, verbose=verbose, **kargs) a = TracerouteResult(a.res) if verbose: a.show() return a, b
Instant TCP traceroute traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None # noqa: E501
7,075
def get_weekly_charts(self, chart_kind, from_date=None, to_date=None): method = ".getWeekly" + chart_kind.title() + "Chart" chart_type = eval(chart_kind.title()) params = self._get_params() if from_date and to_date: params["from"] = from_date params["to"] = to_date doc = self._request(self.ws_prefix + method, True, params) seq = [] for node in doc.getElementsByTagName(chart_kind.lower()): if chart_kind == "artist": item = chart_type(_extract(node, "name"), self.network) else: item = chart_type( _extract(node, "artist"), _extract(node, "name"), self.network ) weight = _number(_extract(node, "playcount")) seq.append(TopItem(item, weight)) return seq
Returns the weekly charts for the week starting from the from_date value to the to_date value. chart_kind should be one of "album", "artist" or "track"
7,076
def start_rpc_listeners(self): self._setup_rpc() if not self.endpoints: return [] self.conn = n_rpc.create_connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) return self.conn.consume_in_threads()
Configure all listeners here
7,077
def date_of_birth(self, value): if value: self._date_of_birth = parse(value).date() if isinstance(value, type_check) else value
The date of birth of the individual. :param value: :return:
7,078
def load_config(self, path, environments, fill_with_defaults=False): yaml.add_implicit_resolver("!environ", self.__environ_pattern) yaml.add_constructor(, self.__get_from_environment) yaml.add_implicit_resolver("!vault", self.__vault_pattern) yaml.add_constructor(, self.__get_from_vault) if not path.endswith(): path += if type(environments) != list: environments = [environments] config = {} try: for env in environments: with open(path + env + , ) as configFile: env_config = yaml.load(configFile.read()) or {} config.update(env_config) if fill_with_defaults: if in config: defaults = config[] for target in defaults: for index, item in enumerate(config[target]): tmp = defaults[target].copy() tmp.update(config[target][index]) config[target][index] = tmp return config except exceptions.VaultError as error: raise ConfigLoaderError("Could not read vault secrets [" + error.__class__.__name__ + "]") except yaml.YAMLError as error: raise ConfigLoaderError("Configuration files malformed [" + error.__class__.__name__ + "]") except json.decoder.JSONDecodeError as error: raise ConfigLoaderError("Vault response was not json [" + error.__class__.__name__ + "]") except Exception as error: raise ConfigLoaderError("WTF? [" + error.__class__.__name__ + "]")
Will load default.yaml and <environment>.yaml at given path. The environment config will override the default values. :param path: directory where to find your config files. If the last character is not a slash (/) it will be appended. Example: resources/ :param environments: list of environment configs to load. File name pattern: <environment>.yaml. Example: develop.yaml. Latter configs will override previous ones. :param fill_with_defaults: use 'defaults' keyword in config file to fill up following config entrys. :return: your config as dictionary.
7,079
def sufar4(ascfile, meas_output=, aniso_output=, spec_infile=None, spec_outfile=, samp_outfile=, site_outfile=, specnum=0, sample_naming_con=, user="", locname="unknown", instrument=, static_15_position_mode=False, dir_path=, input_dir_path=, data_model_num=3): citation = cont = 0 Z = 1 AniRecSs, AniRecs, SpecRecs, SampRecs, SiteRecs, MeasRecs = [], [], [], [], [], [] isspec = spin = 0 data_model_num = int(float(data_model_num)) if data_model_num == 2: if meas_output == : meas_output = if spec_outfile == : spec_outfile = if samp_outfile == : samp_outfile = if site_outfile == : site_outfile = spec_name_col = samp_name_col = site_name_col = loc_name_col = citation_col = method_col = site_description_col = expedition_col = instrument_col = experiment_col = analyst_col = quality_col = aniso_quality_col = meas_standard_col = meas_description_col = aniso_type_col = aniso_unit_col = aniso_n_col = azimuth_col = spec_volume_col = samp_dip_col = bed_dip_col = bed_dip_direction_col = chi_vol_col = aniso_sigma_col = aniso_unit_col = aniso_tilt_corr_col = meas_table_name = spec_table_name = samp_table_name = site_table_name = if data_model_num == 2: spec_name_col = samp_name_col = site_name_col = loc_name_col = citation_col = method_col = site_description_col = expedition_col = instrument_col = experiment_col = analyst_col = quality_col = aniso_quality_col = meas_standard_col = meas_description_col = aniso_type_col = aniso_unit_col = aniso_n_col = azimuth_col = spec_volume_col = samp_dip_col = bed_dip_col = bed_dip_direction_col = chi_vol_col = aniso_sigma_col = aniso_unit_col = aniso_tilt_corr_col = meas_table_name = spec_table_name = samp_table_name = site_table_name = input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) ascfile = os.path.join(input_dir_path, ascfile) aniso_output = os.path.join(output_dir_path, aniso_output) meas_output = os.path.join(output_dir_path, meas_output) spec_outfile = os.path.join(output_dir_path, spec_outfile) samp_outfile = os.path.join(output_dir_path, samp_outfile) site_outfile = os.path.join(output_dir_path, site_outfile) if "4" in sample_naming_con: if "-" not in sample_naming_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "option [4] must be in form 4-Z where Z is an integer" else: Z = sample_naming_con.split("-")[1] sample_naming_con = "4" if "7" in sample_naming_con: if "-" not in sample_naming_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "option [7] must be in form 7-Z where Z is an integer" else: Z = sample_naming_con.split("-")[1] sample_naming_con = "7" if static_15_position_mode: spin = 0 if spec_infile: if os.path.isfile(os.path.join(input_dir_path, str(spec_infile))): isspec = specnum = int(specnum) if isspec == "1": specs, file_type = pmag.magic_read(spec_infile) specnames, sampnames, sitenames = [], [], [] try: file_input = open(ascfile, ) except: print(, ascfile) return False, .format(ascfile) Data = file_input.readlines() file_input.close() k = 0 while k < len(Data): line = Data[k] words = line.split() if "ANISOTROPY" in words: MeasRec, AniRec, SpecRec, SampRec, SiteRec = {}, {}, {}, {}, {} specname = words[0] AniRec[spec_name_col] = specname if isspec == "1": for spec in specs: if spec[spec_name_col] == specname: AniRec[samp_name_col] = spec[samp_name_col] AniRec[site_name_col] = spec[site_name_col] AniRec[loc_name_col] = spec[loc_name_col] break elif isspec == "0": if specnum != 0: sampname = specname[:-specnum] else: sampname = specname AniRec[samp_name_col] = sampname SpecRec[spec_name_col] = specname SpecRec[samp_name_col] = sampname SampRec[samp_name_col] = sampname SiteRec[samp_name_col] = sampname SiteRec[site_description_col] = if sample_naming_con != "9": AniRec[site_name_col] = pmag.parse_site( AniRec[samp_name_col], sample_naming_con, Z) SpecRec[site_name_col] = pmag.parse_site( AniRec[samp_name_col], sample_naming_con, Z) SampRec[site_name_col] = pmag.parse_site( AniRec[samp_name_col], sample_naming_con, Z) SiteRec[site_name_col] = pmag.parse_site( AniRec[samp_name_col], sample_naming_con, Z) else: AniRec[site_name_col] = specname SpecRec[site_name_col] = specname SampRec[site_name_col] = specname SiteRec[site_name_col] = specname pieces = specname.split() AniRec[expedition_col] = pieces[0] SpecRec[expedition_col] = pieces[0] SampRec[expedition_col] = pieces[0] SiteRec[expedition_col] = pieces[0] location = pieces[1] AniRec[loc_name_col] = locname SpecRec[loc_name_col] = locname SampRec[loc_name_col] = locname SiteRec[loc_name_col] = locname AniRec[citation_col] = "This study" SpecRec[citation_col] = "This study" SampRec[citation_col] = "This study" SiteRec[citation_col] = "This study" AniRec[citation_col] = "This study" AniRec[instrument_col] = instrument AniRec[method_col] = "LP-X:AE-H:LP-AN-MS" AniRec[experiment_col] = specname + ":" + "LP-AN-MS" AniRec[analyst_col] = user for key in list(AniRec.keys()): MeasRec[key] = AniRec[key] if data_model_num == 2: MeasRec[] = AniRec.get(, ) if in MeasRec: MeasRec.pop() if data_model_num == 3: MeasRec[] = AniRec.get(, ) if in MeasRec: MeasRec.pop() MeasRec[quality_col] = AniRec[aniso_quality_col] = MeasRec[meas_standard_col] = MeasRec[meas_description_col] = AniRec[aniso_type_col] = "AMS" AniRec[aniso_unit_col] = "Normalized by trace" if spin == 1: AniRec[aniso_n_col] = "192" else: AniRec[aniso_n_col] = "15" if in words and isspec == : az = float(words[1]) P1 = float(words[4]) P2 = float(words[5]) P3 = float(words[6]) P4 = float(words[7]) az = az + P1 * 360. / 12. - P3 * 360. / 12. if az >= 360: az = az - 360 elif az <= -360: az = az + 360 labaz = az SampRec[azimuth_col] = str(round(az, 1)) if in words: SpecRec[spec_volume_col] = % (float(words[10]) * 1e-6) dip = float(words[1]) if P2 == 90: dip = dip - 90. labdip = dip SampRec[samp_dip_col] = str(round(dip, 1)) if in words and in words: k += 2 line = Data[k] rec = line.split() dd = rec[1].split() dip_direction = int(dd[0]) + 90 SampRec[bed_dip_direction_col] = % (dip_direction) SampRec[bed_dip_col] = dd[1] bed_dip = float(dd[1]) if "Mean" in words: k += 4 line = Data[k] rec = line.split() MeasRec[chi_vol_col] = rec[1] sigma = .01 * float(rec[2]) / 3. AniRec[aniso_sigma_col] = % (sigma) AniRec[aniso_unit_col] = if "factors" in words: k += 4 line = Data[k] rec = line.split() if "Specimen" in words: s1_val = % (float(words[5]) / 3.) s2_val = % (float(words[6]) / 3.) s3_val = % (float(words[7]) / 3.) k += 1 line = Data[k] rec = line.split() s4_val= % (float(rec[5]) / 3.) s5_val = % (float(rec[6]) / 3.) s6_val = % (float(rec[7]) / 3.) if data_model_num == 2: AniRec[] = s1_val AniRec[] = s2_val AniRec[] = s3_val AniRec[] = s4_val AniRec[] = s5_val AniRec[] = s6_val else: vals = (s1_val, s2_val, s3_val, s4_val, s5_val, s6_val) AniRec[] = ":".join([v.strip() for v in vals]) AniRec[aniso_tilt_corr_col] = AniRecs.append(AniRec) AniRecG, AniRecT = {}, {} for key in list(AniRec.keys()): AniRecG[key] = AniRec[key] for key in list(AniRec.keys()): AniRecT[key] = AniRec[key] sbar = [] sbar.append(float(s1_val)) sbar.append(float(s2_val)) sbar.append(float(s3_val)) sbar.append(float(s4_val)) sbar.append(float(s5_val)) sbar.append(float(s6_val)) sbarg = pmag.dosgeo(sbar, labaz, labdip) s1_g = % (sbarg[0]) s2_g = % (sbarg[1]) s3_g = % (sbarg[2]) s4_g = % (sbarg[3]) s5_g = % (sbarg[4]) s6_g = % (sbarg[5]) if data_model_num == 2: AniRecG["anisotropy_s1"] = s1_g AniRecG["anisotropy_s2"] = s2_g AniRecG["anisotropy_s3"] = s3_g AniRecG["anisotropy_s4"] = s4_g AniRecG["anisotropy_s5"] = s5_g AniRecG["anisotropy_s6"] = s6_g else: vals = (s1_g, s2_g, s3_g, s4_g, s5_g, s6_g) AniRecG[] = ":".join([v.strip() for v in vals]) AniRecG[aniso_tilt_corr_col] = AniRecs.append(AniRecG) if bed_dip != "" and bed_dip != 0: sbart = pmag.dostilt(sbarg, dip_direction, bed_dip) s1_t = % (sbart[0]) s2_t = % (sbart[1]) s3_t = % (sbart[2]) s4_t = % (sbart[3]) s5_t = % (sbart[4]) s6_t = % (sbart[5]) if data_model_num == 2: AniRecT["anisotropy_s1"] = s1_t AniRecT["anisotropy_s2"] = s2_t AniRecT["anisotropy_s3"] = s3_t AniRecT["anisotropy_s4"] = s4_t AniRecT["anisotropy_s5"] = s5_t AniRecT["anisotropy_s6"] = s6_t else: vals = (s1_t, s2_t, s3_t, s4_t, s5_t, s6_t) AniRecT["aniso_s"] = ":".join([v.strip() for v in vals]) AniRecT[aniso_tilt_corr_col] = AniRecs.append(AniRecT) MeasRecs.append(MeasRec) if SpecRec[spec_name_col] not in specnames: SpecRecs.append(SpecRec) specnames.append(SpecRec[spec_name_col]) if SampRec[samp_name_col] not in sampnames: SampRecs.append(SampRec) sampnames.append(SampRec[samp_name_col]) if SiteRec[site_name_col] not in sitenames: SiteRecs.append(SiteRec) sitenames.append(SiteRec[site_name_col]) k += 1 pmag.magic_write(meas_output, MeasRecs, meas_table_name) print("bulk measurements put in ", meas_output) SpecOut, keys = pmag.fillkeys(SpecRecs) if data_model_num == 2: pmag.magic_write(aniso_output, AniRecs, ) print("anisotropy tensors put in ", aniso_output) if data_model_num == 3: full_SpecOut = [] spec_list = [] for rec in SpecOut: full_SpecOut.append(rec) spec_name = rec[spec_name_col] if spec_name not in spec_list: spec_list.append(spec_name) ani_recs = pmag.get_dictitem(AniRecs, spec_name_col, spec_name, ) full_SpecOut.extend(ani_recs) full_SpecOut, keys = pmag.fillkeys(full_SpecOut) else: full_SpecOut = SpecOut pmag.magic_write(spec_outfile, full_SpecOut, spec_table_name) print("specimen/anisotropy info put in ", spec_outfile) SampOut, keys = pmag.fillkeys(SampRecs) pmag.magic_write(samp_outfile, SampOut, samp_table_name) print("sample info put in ", samp_outfile) SiteOut, keys = pmag.fillkeys(SiteRecs) pmag.magic_write(site_outfile, SiteOut, site_table_name) print("site info put in ", site_outfile) return True, meas_output
Converts ascii files generated by SUFAR ver.4.0 to MagIC files Parameters ---------- ascfile : str input ASC file, required meas_output : str measurement output filename, default "measurements.txt" aniso_output : str anisotropy output filename, MagIC 2 only, "rmag_anisotropy.txt" spec_infile : str specimen infile, default None spec_outfile : str specimen outfile, default "specimens.txt" samp_outfile : str sample outfile, default "samples.txt" site_outfile : str site outfile, default "sites.txt" specnum : int number of characters to designate a specimen, default 0 sample_naming_con : str sample/site naming convention, default '1', see info below user : str user name, default "" locname : str location name, default "unknown" instrument : str instrument name, default "" static_15_position_mode : bool specify static 15 position mode - default False (is spinning) dir_path : str output directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" data_model_num : int MagIC data model 2 or 3, default 3 Returns -------- type - Tuple : (True or False indicating if conversion was sucessful, file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name = sample name [6] site name entered in site_name column in the orient.txt format input file -- NOT CURRENTLY SUPPORTED [7-Z] [XXX]YYY: XXX is site designation with Z characters from samples XXXYYY
7,080
def diffuse(self, *args): mode = diffusingModeEnum.unknown if (isinstance(args[0], str) and (len(args) == 3)): mode = diffusingModeEnum.element elif (hasattr(args[0], "__len__") and (len(args) == 2)): mode = diffusingModeEnum.elements else: raise TypeError( "Called diffuse method using bad argments, receive this" + " , but expected or" + " ." .format(args)) self._diffuse(mode, *args)
this is a dispatcher of diffuse implementation. Depending of the arguments used.
7,081
def post_request(profile, resource, payload): url = get_url(profile, resource) headers = get_headers(profile) response = requests.post(url, json=payload, headers=headers) return response.json()
Do a POST request to Github's API. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. resource The part of a Github API URL that comes after ``.../:repo/git``. For instance, for ``.../:repo/git/commits``, it's ``/commits``. payload A dict of values to send as the payload of the POST request. The data will be JSON-encoded. Returns: The body of the response, converted from JSON into a Python dict.
7,082
def QA_indicator_RSI(DataFrame, N1=12, N2=26, N3=9): CLOSE = DataFrame[] LC = REF(CLOSE, 1) RSI1 = SMA(MAX(CLOSE - LC, 0), N1) / SMA(ABS(CLOSE - LC), N1) * 100 RSI2 = SMA(MAX(CLOSE - LC, 0), N2) / SMA(ABS(CLOSE - LC), N2) * 100 RSI3 = SMA(MAX(CLOSE - LC, 0), N3) / SMA(ABS(CLOSE - LC), N3) * 100 DICT = {: RSI1, : RSI2, : RSI3} return pd.DataFrame(DICT)
相对强弱指标RSI1:SMA(MAX(CLOSE-LC,0),N1,1)/SMA(ABS(CLOSE-LC),N1,1)*100;
7,083
def fdrcorrection(pvals, alpha=0.05): pvals = np.asarray(pvals) pvals_sortind = np.argsort(pvals) pvals_sorted = np.take(pvals, pvals_sortind) ecdffactor = _ecdf(pvals_sorted) reject = pvals_sorted <= ecdffactor*alpha if reject.any(): rejectmax = max(np.nonzero(reject)[0]) reject[:rejectmax] = True pvals_corrected_raw = pvals_sorted / ecdffactor pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1] del pvals_corrected_raw pvals_corrected[pvals_corrected>1] = 1 pvals_corrected_ = np.empty_like(pvals_corrected) pvals_corrected_[pvals_sortind] = pvals_corrected del pvals_corrected reject_ = np.empty_like(reject) reject_[pvals_sortind] = reject return reject_, pvals_corrected_
benjamini hocheberg fdr correction. inspired by statsmodels
7,084
def save(self, *args, **kwargs): auto_update = kwargs.get(, True) if auto_update: self.updated = now() if in kwargs: kwargs.pop() super(BaseDate, self).save(*args, **kwargs)
automatically update updated date field
7,085
def sys_open(self, buf, flags, mode): filename = self.current.read_string(buf) try: f = self._sys_open_get_file(filename, flags) logger.debug(f"Opening file {filename} for real fd {f.fileno()}") except IOError as e: logger.warning(f"Could not open file {filename}. Reason: {e!s}") return -e.errno if e.errno is not None else -errno.EINVAL return self._open(f)
:param buf: address of zero-terminated pathname :param flags: file access bits :param mode: file permission mode
7,086
def prepare_mainsubstituter(): substituter = Substituter() for module in (builtins, numpy, datetime, unittest, doctest, inspect, io, os, sys, time, collections, itertools, subprocess, scipy, typing): substituter.add_module(module) for subpackage in (auxs, core, cythons, exe): for dummy, name, dummy in pkgutil.walk_packages(subpackage.__path__): full_name = subpackage.__name__ + + name substituter.add_module(importlib.import_module(full_name)) substituter.add_modules(models) for cymodule in (annutils, smoothutils, pointerutils): substituter.add_module(cymodule, cython=True) substituter._short2long[] = substituter._short2long[] = return substituter
Prepare and return a |Substituter| object for the main `__init__` file of *HydPy*.
7,087
def _make_multidim_func(one_d_func, n, *args): _args = list(args) n = np.atleast_1d(n) args = list(map(np.atleast_1d, _args)) if all([x.size == 1 for x in [n] + args]): return one_d_func(n[0], *_args) d = n.size for i in range(len(args)): if args[i].size == 1: args[i] = np.repeat(args[i], d) nodes = [] weights = [] for i in range(d): ai = [x[i] for x in args] _1d = one_d_func(n[i], *ai) nodes.append(_1d[0]) weights.append(_1d[1]) weights = ckron(*weights[::-1]) nodes = gridmake(*nodes) return nodes, weights
A helper function to cut down on code repetition. Almost all of the code in qnwcheb, qnwlege, qnwsimp, qnwtrap is just dealing various forms of input arguments and then shelling out to the corresponding 1d version of the function. This routine does all the argument checking and passes things through the appropriate 1d function before using a tensor product to combine weights and nodes. Parameters ---------- one_d_func : function The 1d function to be called along each dimension n : int or array_like(float) A length-d iterable of the number of nodes in each dimension args : These are the arguments to various qnw____ functions. For the majority of the functions this is just a and b, but some differ. Returns ------- func : function The multi-dimensional version of the parameter ``one_d_func``
7,088
def get_comments(self): return github.PaginatedList.PaginatedList( github.GistComment.GistComment, self._requester, self.url + "/comments", None )
:calls: `GET /gists/:gist_id/comments <http://developer.github.com/v3/gists/comments>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.GistComment.GistComment`
7,089
def java_install(self): sudo() java_home = run( ) append(bigdata_conf.global_env_home, .format( java_home ), use_sudo=True) run(.format(bigdata_conf.global_env_home))
install java :return:
7,090
def _bind_sources_to_destination(self): seen = set() for src in self._spec.sources: for src_ase in src.files( self._creds, self._spec.options, self._general_options.dry_run): dest = [ dst_ase for dst_ase in self._generate_destination_for_source(src_ase) ] if len(dest) == 0: continue primary_dst = dest[0] uid = blobxfer.operations.synccopy.SyncCopy.create_deletion_id( primary_dst._client, primary_dst.container, primary_dst.name) if uid in seen: raise RuntimeError( .format( primary_dst._client.primary_endpoint, primary_dst.path)) seen.add(uid) if self._spec.options.delete_extraneous_destination: self._delete_exclude.add(uid) if len(dest[1:]) > 0: if primary_dst.replica_targets is None: primary_dst.replica_targets = [] primary_dst.replica_targets.extend(dest[1:]) for rt in primary_dst.replica_targets: ruid = ( blobxfer.operations.synccopy.SyncCopy. create_deletion_id( rt._client, rt.container, rt.name) ) if ruid in seen: raise RuntimeError( ( ).format( rt._client.primary_endpoint, rt.path)) seen.add(ruid) if self._spec.options.delete_extraneous_destination: self._delete_exclude.add(ruid) yield src_ase, primary_dst
Bind source storage entity to destination storage entities :param SyncCopy self: this :rtype: tuple :return: (source storage entity, destination storage entity)
7,091
def _reuse_pre_installed_setuptools(env, installer): if not env.setuptools_version: return reuse_old = config.reuse_old_setuptools reuse_best = config.reuse_best_setuptools reuse_future = config.reuse_future_setuptools reuse_comment = None if reuse_old or reuse_best or reuse_future: pv_old = parse_version(env.setuptools_version) pv_new = parse_version(installer.setuptools_version()) if pv_old < pv_new: if reuse_old: reuse_comment = "%s+ recommended" % ( installer.setuptools_version(),) elif pv_old > pv_new: if reuse_future: reuse_comment = "%s+ required" % ( installer.setuptools_version(),) elif reuse_best: reuse_comment = "" if reuse_comment is None: return if reuse_comment: reuse_comment = " (%s)" % (reuse_comment,) print("Reusing pre-installed setuptools %s distribution%s." % ( env.setuptools_version, reuse_comment)) return True
Return whether a pre-installed setuptools distribution should be reused.
7,092
def on_attribute(self, name): def decorator(fn): if isinstance(name, list): for n in name: self.add_attribute_listener(n, fn) else: self.add_attribute_listener(name, fn) return decorator
Decorator for attribute listeners. The decorated function (``observer``) is invoked differently depending on the *type of attribute*. Attributes that represent sensor values or which are used to monitor connection status are updated whenever a message is received from the vehicle. Attributes which reflect vehicle "state" are only updated when their values change (for example :py:func:`Vehicle.system_status`, :py:attr:`Vehicle.armed`, and :py:attr:`Vehicle.mode`). The argument list for the callback is ``observer(object, attr_name, attribute_value)`` * ``self`` - the associated :py:class:`Vehicle`. This may be compared to a global vehicle handle to implement vehicle-specific callback handling (if needed). * ``attr_name`` - the attribute name. This can be used to infer which attribute has triggered if the same callback is used for watching several attributes. * ``msg`` - the attribute value (so you don't need to re-query the vehicle object). .. note:: There is no way to remove an attribute listener added with this decorator. Use :py:func:`add_attribute_listener` if you need to be able to remove the :py:func:`attribute listener <remove_attribute_listener>`. The code fragment below shows how you can create a listener for the attitude attribute. .. code:: python @vehicle.on_attribute('attitude') def attitude_listener(self, name, msg): print '%s attribute is: %s' % (name, msg) See :ref:`vehicle_state_observe_attributes` for more information. :param String name: The name of the attribute to watch (or '*' to watch all attributes). :param observer: The callback to invoke when a change in the attribute is detected.
7,093
def uridecode(uristring, encoding=, errors=): if not isinstance(uristring, bytes): uristring = uristring.encode(encoding or , errors) parts = uristring.split(b) result = [parts[0]] append = result.append decode = _decoded.get for s in parts[1:]: append(decode(s[:2], b + s[:2])) append(s[2:]) if encoding is not None: return b.join(result).decode(encoding, errors) else: return b.join(result)
Decode a URI string or string component.
7,094
def to_bytes(self): out = [PNG_SIGN] other_chunks = [] seq = 0 png, control = self.frames[0] out.append(png.hdr) out.append(make_chunk("acTL", struct.pack("!II", len(self.frames), self.num_plays))) if control: out.append(make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes())) seq += 1 idat_chunks = [] for type_, data in png.chunks: if type_ in ("IHDR", "IEND"): continue if type_ == "IDAT": idat_chunks.append(data) continue out.append(data) out.extend(idat_chunks) for png, control in self.frames[1:]: out.append( make_chunk("fcTL", struct.pack("!I", seq) + control.to_bytes()) ) seq += 1 for type_, data in png.chunks: if type_ in ("IHDR", "IEND") or type_ in CHUNK_BEFORE_IDAT: continue elif type_ == "IDAT": out.append( make_chunk("fdAT", struct.pack("!I", seq) + data[8:-4]) ) seq += 1 else: other_chunks.append(data) out.extend(other_chunks) out.append(png.end) return b"".join(out)
Convert the entire image to bytes. :rtype: bytes
7,095
async def set_position(self, position, wait_for_completion=True): command_send = CommandSend( pyvlx=self.pyvlx, wait_for_completion=wait_for_completion, node_id=self.node_id, parameter=position) await command_send.do_api_call() if not command_send.success: raise PyVLXException("Unable to send command") await self.after_update()
Set window to desired position. Parameters: * position: Position object containing the target position. * wait_for_completion: If set, function will return after device has reached target position.
7,096
def generate_ill_conditioned_dot_product(n, c, dps=100): assert n >= 6 n2 = round(n / 2) x = numpy.zeros(n) y = numpy.zeros(n) b = math.log2(c) e = numpy.rint(numpy.random.rand(n2) * b / 2).astype(int) e[0] = round(b / 2) + 1 e[-1] = 0 rx, ry = numpy.random.rand(2, n2) x[:n2] = (2 * rx - 1) * 2 ** e y[:n2] = (2 * ry - 1) * 2 ** e def dot_exact(x, y): mp.dps = dps return mp.fdot(x.tolist(), y.tolist()) e = numpy.rint(numpy.linspace(b / 2, 0, n - n2)).astype(int) rx, ry = numpy.random.rand(2, n2) for i in range(n2, n): x[i] = (2 * rx[i - n2] - 1) * 2 ** e[i - n2] y[i] = ( (2 * ry[i - n2] - 1) * 2 ** e[i - n2] - dot_exact(x[: i + 1], y[: i + 1]) ) / x[i] x, y = numpy.random.permutation((x, y)) d = dot_exact(x, y) C = 2 * dot_exact(abs(x), abs(y)) / abs(d) return x, y, d, C
n ... length of vector c ... target condition number
7,097
def get_partition_by_name(self, db_name, tbl_name, part_name): self.send_get_partition_by_name(db_name, tbl_name, part_name) return self.recv_get_partition_by_name()
Parameters: - db_name - tbl_name - part_name
7,098
def changed(self, name): if self.first: return self.first.changed(name) else: return False
Returns true if the parameter with the specified name has its value changed by the *first* module procedure in the interface. :arg name: the name of the parameter to check changed status for.
7,099
def env(self, **kw): self._original_env = kw if self._env is None: self._env = dict(os.environ) self._env.update({k: unicode(v) for k, v in kw.iteritems()}) return self
Allows adding/overriding env vars in the execution context. :param kw: Key-value pairs :return: self