Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
383,200
def hull(self): from scipy.spatial import ConvexHull if len(self.coordinates) >= 4: inds = ConvexHull(self.coordinates).vertices return self.coordinates[inds] else: return self.coordinates
Bounding polygon as a convex hull.
383,201
def lagcrp_helper(egg, match=, distance=, ts=None, features=None): def lagcrp(rec, lstlen): def check_pair(a, b): if (a>0 and b>0) and (a!=b): return True else: return False def compute_actual(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in range(0,len(rec)-1): a=rec[trial] b=rec[trial+1] if check_pair(a, b) and (a not in recalled) and (b not in recalled): arr[b-a]+=1 recalled.append(a) return arr def compute_possible(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in rec: if np.isnan(trial): pass else: lbound=int(1-trial) ubound=int(lstlen-trial) chances=list(range(lbound,0))+list(range(1,ubound+1)) for each in recalled: if each-trial in chances: chances.remove(each-trial) arr[chances]+=1 recalled.append(trial) return arr actual = compute_actual(rec, lstlen) possible = compute_possible(rec, lstlen) crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)] crp.insert(int(len(crp) / 2), np.nan) return crp def nlagcrp(distmat, ts=None): def lagcrp_model(s): idx = list(range(0, -s, -1)) return np.array([list(range(i, i+s)) for i in idx]) distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T model = lagcrp_model(distmat.shape[1]) lagcrp = np.zeros(ts * 2) for rdx in range(len(distmat)-1): item = distmat[rdx, :] next_item = distmat[rdx+1, :] if not np.isnan(item).any() and not np.isnan(next_item).any(): outer = np.outer(item, next_item) lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts)))) lagcrp /= ts lagcrp = list(lagcrp) lagcrp.insert(int(len(lagcrp) / 2), np.nan) return np.array(lagcrp) def _format(p, r): p = np.matrix([np.array(i) for i in p]) if p.shape[0]==1: p=p.T r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r) r = np.matrix([np.array(i) for i in r]) if r.shape[0]==1: r=r.T return p, r opts = dict(match=match, distance=distance, features=features) if match is : opts.update({ : }) recmat = recall_matrix(egg, **opts) if not ts: ts = egg.pres.shape[1] if match in [, ]: lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat] elif match is : lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0)) else: raise ValueError() return np.nanmean(lagcrp, axis=0)
Computes probabilities for each transition distance (probability that a word recalled will be a given distance--in presentation order--from the previous recalled word). Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prec : numpy array each float is the probability of transition distance (distnaces indexed by position, from -(n-1) to (n-1), excluding zero
383,202
def import_teamocil(sconf): tmuxp_config = {} if in sconf: sconf = sconf[] if in sconf: tmuxp_config[] = sconf[] else: tmuxp_config[] = None if in sconf: tmuxp_config[] = sconf.pop() tmuxp_config[] = [] for w in sconf[]: windowdict = {: w[]} if in w: windowdict[] = w[] if in w: if in w[]: for b in w[][]: windowdict[] = w[][] if in w[]: for b in w[][]: windowdict[] = w[][] if in w: windowdict[] = w.pop() if in w: w[] = w.pop() if in w: for p in w[]: if in p: p[] = p.pop() if in p: p.pop() windowdict[] = w[] if in w: windowdict[] = w[] tmuxp_config[].append(windowdict) return tmuxp_config
Return tmuxp config from a `teamocil`_ yaml config. .. _teamocil: https://github.com/remiprev/teamocil Parameters ---------- sconf : dict python dict for session configuration Notes ----- Todos: - change 'root' to a cd or start_directory - width in pane -> main-pain-width - with_env_var - clear - cmd_separator
383,203
def _swap_optimizer_allows(self, p1, p2): a = self._array tile1 = a[p1] tile2 = a[p2] if tile1 == tile2: return False if tile1.matches(tile2) and not any(t.is_wildcard() for t in (tile1, tile2)): return False center_other_pairs = ((p1, p2), (p2, p1)) class MatchedTiles(Exception): pass try: for center_p, other_p in center_other_pairs: up_down_left_right = ((center_p[0] - 1, center_p[1]), (center_p[0] + 1, center_p[1]), (center_p[0], center_p[1] - 1), (center_p[0], center_p[1] + 1)) post_swap_center_tile = a[other_p] for surrounding_p in up_down_left_right: if any((not (0 <= surrounding_p[0] <= 7), not (0 <= surrounding_p[1] <= 7), surrounding_p == other_p)): continue surrounding_tile = a[surrounding_p] if post_swap_center_tile.matches(surrounding_tile): raise MatchedTiles() except MatchedTiles: pass else: return False return True
Identify easily discarded meaningless swaps. This is motivated by the cost of millions of swaps being simulated.
383,204
def order_by(self, key): if key is None: raise NullArgumentError(u"No key for sorting given") kf = [OrderingDirection(key, reverse=False)] return SortedEnumerable(key_funcs=kf, data=self._data)
Returns new Enumerable sorted in ascending order by given key :param key: key to sort by as lambda expression :return: new Enumerable object
383,205
def thread_safe(method): @functools.wraps(method) def _locker(self, *args, **kwargs): assert hasattr(self, ), \ \ .format(self.__class__.__name__, method.__name__) try: self.lock.acquire() return method(self, *args, **kwargs) finally: try: self.lock.release() except: sys.stderr.write(.format(method.__name__)) traceback.print_exc(file=sys.stderr) return _locker
wraps method with lock acquire/release cycle decorator requires class instance to have field self.lock of type threading.Lock or threading.RLock
383,206
def save_task(task, broker): if not existing_task.success: existing_task.stopped = task[] existing_task.result = task[] existing_task.success = task[] existing_task.save() else: Task.objects.create(id=task[], name=task[], func=task[], hook=task.get(), args=task[], kwargs=task[], started=task[], stopped=task[], result=task[], group=task.get(), success=task[] ) except Exception as e: logger.error(e)
Saves the task package to Django or the cache
383,207
def save(self, **kwargs): entry = super(FormForForm, self).save(commit=False) entry.form = self.form entry.entry_time = now() entry.save() entry_fields = entry.fields.values_list("field_id", flat=True) new_entry_fields = [] for field in self.form_fields: field_key = "field_%s" % field.id value = self.cleaned_data[field_key] if value and self.fields[field_key].widget.needs_multipart_form: value = fs.save(join("forms", str(uuid4()), value.name), value) if isinstance(value, list): value = ", ".join([v.strip() for v in value]) if field.id in entry_fields: field_entry = entry.fields.get(field_id=field.id) field_entry.value = value field_entry.save() else: new = {"entry": entry, "field_id": field.id, "value": value} new_entry_fields.append(FieldEntry(**new)) if new_entry_fields: FieldEntry.objects.bulk_create(new_entry_fields) return entry
Create a ``FormEntry`` instance and related ``FieldEntry`` instances for each form field.
383,208
def add_subtract(st, max_iter=7, max_npart=, max_mem=2e8, always_check_remove=False, **kwargs): if max_npart == : max_npart = 0.05 * st.obj_get_positions().shape[0] total_changed = 0 _change_since_opt = 0 removed_poses = [] added_poses0 = [] added_poses = [] nr = 1 for _ in range(max_iter): if (nr != 0) or (always_check_remove): nr, rposes = remove_bad_particles(st, **kwargs) na, aposes = add_missing_particles(st, **kwargs) current_changed = na + nr removed_poses.extend(rposes) added_poses0.extend(aposes) total_changed += current_changed _change_since_opt += current_changed if current_changed == 0: break elif _change_since_opt > max_npart: _change_since_opt *= 0 CLOG.info() opt.do_levmarq(st, opt.name_globals(st, remove_params=st.get( ).params), max_iter=1, run_length=4, num_eig_dirs=3, max_mem=max_mem, eig_update_frequency=2, rz_order=0, use_accel=True) CLOG.info(.format(st.error)) for p in added_poses0: i = st.obj_closest_particle(p) opt.do_levmarq_particles(st, np.array([i]), max_iter=2, damping=0.3) added_poses.append(st.obj_get_positions()[i]) return total_changed, np.array(removed_poses), np.array(added_poses)
Automatically adds and subtracts missing & extra particles. Operates by removing bad particles then adding missing particles on repeat, until either no particles are added/removed or after `max_iter` attempts. Parameters ---------- st: :class:`peri.states.State` The state to add and subtract particles to. max_iter : Int, optional The maximum number of add-subtract loops to use. Default is 7. Terminates after either max_iter loops or when nothing has changed. max_npart : Int or 'calc', optional The maximum number of particles to add before optimizing the non-psf globals. Default is ``'calc'``, which uses 5% of the initial number of particles. max_mem : Int, optional The maximum memory to use for optimization after adding max_npart particles. Default is 2e8. always_check_remove : Bool, optional Set to True to always check whether to remove particles. If ``False``, only checks for removal while particles were removed on the previous attempt. Default is False. Other Parameters ---------------- invert : Bool, optional ``True`` if the particles are dark on a bright background, ``False`` if they are bright on a dark background. Default is ``True``. min_rad : Float, optional Particles with radius below ``min_rad`` are automatically deleted. Default is ``'calc'`` = median rad - 25* radius std. max_rad : Float, optional Particles with radius above ``max_rad`` are automatically deleted. Default is ``'calc'`` = median rad + 15* radius std, but you should change this for your particle sizes. min_edge_dist : Float, optional Particles closer to the edge of the padded image than this are automatically deleted. Default is 2.0. check_rad_cutoff : 2-element float list. Particles with ``radii < check_rad_cutoff[0]`` or ``> check...[1]`` are checked if they should be deleted (not automatic). Default is ``[3.5, 15]``. check_outside_im : Bool, optional Set to True to check whether to delete particles whose positions are outside the un-padded image. rad : Float, optional The initial radius for added particles; added particles radii are not fit until the end of ``add_subtract``. Default is ``'calc'``, which uses the median radii of active particles. tries : Int, optional The number of particles to attempt to remove or add, per iteration. Default is 50. im_change_frac : Float, optional How good the change in error needs to be relative to the change in the difference image. Default is 0.2; i.e. if the error does not decrease by 20% of the change in the difference image, do not add the particle. min_derr : Float, optional The minimum change in the state's error to keep a particle in the image. Default is ``'3sig'`` which uses ``3*st.sigma``. do_opt : Bool, optional Set to False to avoid optimizing particle positions after adding. minmass : Float, optional The minimum mass for a particle to be identified as a feature, as used by trackpy. Defaults to a decent guess. use_tp : Bool, optional Set to True to use trackpy to find missing particles inside the image. Not recommended since trackpy deliberately cuts out particles at the edge of the image. Default is ``False``. Returns ------- total_changed : Int The total number of adds and subtracts done on the data. Not the same as ``changed_inds.size`` since the same particle or particle index can be added/subtracted multiple times. added_positions : [N_added,3] numpy.ndarray The positions of particles that have been added at any point in the add-subtract cycle. removed_positions : [N_added,3] numpy.ndarray The positions of particles that have been removed at any point in the add-subtract cycle. Notes ------ Occasionally after the intial featuring a cluster of particles is featured as 1 big particle. To fix these mistakes, it helps to set max_rad to a physical value. This removes the big particle and allows it to be re-featured by (several passes of) the adds. The added/removed positions returned are whether or not the position has been added or removed ever. It's possible that a position is added, then removed during a later iteration.
383,209
def transform_y(self, tfms:TfmList=None, **kwargs): "Set `tfms` to be applied to the targets only." _check_kwargs(self.y, tfms, **kwargs) self.tfm_y=True if tfms is None: self.tfms_y = list(filter(lambda t: t.use_on_y, listify(self.tfms))) self.tfmargs_y = {**self.tfmargs, **kwargs} else: tfms = list(filter(lambda t: t.use_on_y, tfms)) self.tfms_y,self.tfmargs_y = tfms,kwargs return self
Set `tfms` to be applied to the targets only.
383,210
def write(self, str): if self.closed: raise ValueError() if self._mode in _allowed_read: raise Exception() if self._valid is not None: raise Exception() if not self._done_header: self._write_header() encrypted = self._crypto.encrypt(str) self._checksumer.update(encrypted) self._fp.write(encrypted)
Write string str to the underlying file. Note that due to buffering, flush() or close() may be needed before the file on disk reflects the data written.
383,211
def exists(self): if self._create: return False try: self.getProfile() return True except RequestFailed: return False
:type: bool True when the object actually exists (and can be accessed by the current user) in Fedora
383,212
def fetch(self): params = values.of({}) payload = self._version.fetch( , self._uri, params=params, ) return ChallengeInstance( self._version, payload, service_sid=self._solution[], identity=self._solution[], factor_sid=self._solution[], sid=self._solution[], )
Fetch a ChallengeInstance :returns: Fetched ChallengeInstance :rtype: twilio.rest.authy.v1.service.entity.factor.challenge.ChallengeInstance
383,213
def add_row(self, label, row_data, columns=""): if len(columns): if sorted(self.df.columns) == sorted(columns): self.df.columns = columns else: new_columns = [] new_columns.extend(columns) for col in self.df.columns: if col not in new_columns: new_columns.append(col) if sorted(row_data.keys()) != sorted(self.df.columns): for key in row_data: if key not in self.df.columns: self.df[key] = None for col_label in self.df.columns: if col_label not in list(row_data.keys()): row_data[col_label] = None self.df.index = self.df.index.astype(str) label = str(label) self.df.loc[label + "new"] = pd.Series(row_data) self.df.rename(index={label + "new": label}, inplace=True) return self.df
Add a row with data. If any new keys are present in row_data dictionary, that column will be added to the dataframe. This is done inplace
383,214
def set_cell(self, index, value): if self._sort: exists, i = sorted_exists(self._index, index) if not exists: self._insert_row(i, index) else: try: i = self._index.index(index) except ValueError: i = len(self._index) self._add_row(index) self._data[i] = value
Sets the value of a single cell. If the index is not in the current index then a new index will be created. :param index: index value :param value: value to set :return: nothing
383,215
def returner(ret): signaled = dispatch.Signal(providing_args=[]).send(sender=, ret=ret) for signal in signaled: log.debug( returner\ , signal[0], signal[1] )
Signal a Django server that a return is available
383,216
def norm_package_version(version): if version: version = .join(v.strip() for v in version.split()).strip() if version.startswith() and version.endswith(): version = version[1:-1] version = .join(v for v in version if v.strip()) else: version = return version
Normalize a version by removing extra spaces and parentheses.
383,217
def dict_array_bytes(ary, template): shape = shape_from_str_tuple(ary[], template) dtype = dtype_from_str(ary[], template) return array_bytes(shape, dtype)
Return the number of bytes required by an array Arguments --------------- ary : dict Dictionary representation of an array template : dict A dictionary of key-values, used to replace any string values in the array with concrete integral values Returns ----------- The number of bytes required to represent the array.
383,218
def check(a, b): aencrypt = encrypt(a) bencrypt = encrypt(b) return a == b or a == bencrypt or aencrypt == b
Checks to see if the two values are equal to each other. :param a | <str> b | <str> :return <bool>
383,219
def from_shapely(polygon_shapely, label=None): import shapely.geometry ia.do_assert(isinstance(polygon_shapely, shapely.geometry.Polygon)) if polygon_shapely.exterior is None or len(polygon_shapely.exterior.coords) == 0: return Polygon([], label=label) exterior = np.float32([[x, y] for (x, y) in polygon_shapely.exterior.coords]) return Polygon(exterior, label=label)
Create a polygon from a Shapely polygon. Note: This will remove any holes in the Shapely polygon. Parameters ---------- polygon_shapely : shapely.geometry.Polygon The shapely polygon. label : None or str, optional The label of the new polygon. Returns ------- imgaug.Polygon A polygon with the same exterior as the Shapely polygon.
383,220
def delete(self, *keys): return self._execute([b] + list(keys), len(keys))
Removes the specified keys. A key is ignored if it does not exist. Returns :data:`True` if all keys are removed. .. note:: **Time complexity**: ``O(N)`` where ``N`` is the number of keys that will be removed. When a key to remove holds a value other than a string, the individual complexity for this key is ``O(M)`` where ``M`` is the number of elements in the list, set, sorted set or hash. Removing a single key that holds a string value is ``O(1)``. :param keys: One or more keys to remove :type keys: :class:`str`, :class:`bytes` :rtype: bool :raises: :exc:`~tredis.exceptions.RedisError`
383,221
def _handle_ping(client, topic, dct): if dct[] == : resp = { : , : client.name, : dct } client.publish(, resp)
Internal method that will be called when receiving ping message.
383,222
def __system_multiCall(calls, **kwargs): if not isinstance(calls, list): raise RPCInvalidParams(.format(type(calls))) handler = kwargs.get(HANDLER_KEY) results = [] for call in calls: try: result = handler.execute_procedure(call[], args=call.get()) results.append([result]) except RPCException as e: results.append({ : e.code, : e.message, }) except Exception as e: results.append({ : RPC_INTERNAL_ERROR, : str(e), }) return results
Call multiple RPC methods at once. :param calls: An array of struct like {"methodName": string, "params": array } :param kwargs: Internal data :type calls: list :type kwargs: dict :return:
383,223
def serialize(self, obj, method=, beautify=False, raise_exception=False): return self.helper.string.serialization.serialize( obj=obj, method=method, beautify=beautify, raise_exception=raise_exception)
Alias of helper.string.serialization.serialize
383,224
def cidr_to_ipv4_netmask(cidr_bits): try: cidr_bits = int(cidr_bits) if not 1 <= cidr_bits <= 32: return except ValueError: return netmask = for idx in range(4): if idx: netmask += if cidr_bits >= 8: netmask += cidr_bits -= 8 else: netmask += .format(256 - (2 ** (8 - cidr_bits))) cidr_bits = 0 return netmask
Returns an IPv4 netmask
383,225
def _parse_rd(self, config): match = RD_RE.search(config) if match: value = match.group() else: value = match return dict(rd=value)
_parse_rd scans the provided configuration block and extracts the vrf rd. The return dict is intended to be merged into the response dict. Args: config (str): The vrf configuration block from the nodes running configuration Returns: dict: resource dict attribute
383,226
def SkyCoord(self,*args,**kwargs): kwargs.pop(,None) _check_roSet(self,kwargs,) radec= self._radec(*args,**kwargs) tdist= self.dist(quantity=False,*args,**kwargs) if not _APY3: return coordinates.SkyCoord(radec[:,0]*units.degree, radec[:,1]*units.degree, distance=tdist*units.kpc, frame=) pmrapmdec= self._pmrapmdec(*args,**kwargs) vlos= self._lbdvrpmllpmbb(*args,**kwargs)[:,3] v_sun= coordinates.CartesianDifferential(\ nu.array([-self._solarmotion[0], self._solarmotion[1]+self._vo, self._solarmotion[2]])*units.km/units.s) return coordinates.SkyCoord(radec[:,0]*units.degree, radec[:,1]*units.degree, distance=tdist*units.kpc, pm_ra_cosdec=pmrapmdec[:,0]\ *units.mas/units.yr, pm_dec=pmrapmdec[:,1]*units.mas/units.yr, radial_velocity=vlos*units.km/units.s, frame=, galcen_distance=\ nu.sqrt(self._ro**2.+self._zo**2.)\ *units.kpc, z_sun=self._zo*units.kpc, galcen_v_sun=v_sun)
NAME: SkyCoord PURPOSE: return the position as an astropy SkyCoord INPUT: t - (optional) time at which to get the position obs=[X,Y,Z] - (optional) position of observer (in kpc) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) OUTPUT: SkyCoord(t) HISTORY: 2015-06-02 - Written - Bovy (IAS)
383,227
def d3logpdf_df3(self, f, y, Y_metadata=None): if isinstance(self.gp_link, link_functions.Identity): d3logpdf_df3 = self.d3logpdf_dlink3(f, y, Y_metadata=Y_metadata) else: inv_link_f = self.gp_link.transf(f) d3logpdf_dlink3 = self.d3logpdf_dlink3(inv_link_f, y, Y_metadata=Y_metadata) dlink_df = self.gp_link.dtransf_df(f) d2logpdf_dlink2 = self.d2logpdf_dlink2(inv_link_f, y, Y_metadata=Y_metadata) d2link_df2 = self.gp_link.d2transf_df2(f) dlogpdf_dlink = self.dlogpdf_dlink(inv_link_f, y, Y_metadata=Y_metadata) d3link_df3 = self.gp_link.d3transf_df3(f) d3logpdf_df3 = chain_3(d3logpdf_dlink3, dlink_df, d2logpdf_dlink2, d2link_df2, dlogpdf_dlink, d3link_df3) return d3logpdf_df3
Evaluates the link function link(f) then computes the third derivative of log likelihood using it Uses the Faa di Bruno's formula for the chain rule .. math:: \\frac{d^{3}\\log p(y|\\lambda(f))}{df^{3}} = \\frac{d^{3}\\log p(y|\\lambda(f)}{d\\lambda(f)^{3}}\\left(\\frac{d\\lambda(f)}{df}\\right)^{3} + 3\\frac{d^{2}\\log p(y|\\lambda(f)}{d\\lambda(f)^{2}}\\frac{d\\lambda(f)}{df}\\frac{d^{2}\\lambda(f)}{df^{2}} + \\frac{d\\log p(y|\\lambda(f)}{d\\lambda(f)}\\frac{d^{3}\\lambda(f)}{df^{3}} :param f: latent variables f :type f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution - not used :returns: third derivative of log likelihood evaluated for this point :rtype: float
383,228
def canonical_url(configs, endpoint_type=PUBLIC): scheme = _get_scheme(configs) address = resolve_address(endpoint_type) if is_ipv6(address): address = "[{}]".format(address) return % (scheme, address)
Returns the correct HTTP URL to this host given the state of HTTPS configuration, hacluster and charm configuration. :param configs: OSTemplateRenderer config templating object to inspect for a complete https context. :param endpoint_type: str endpoint type to resolve. :param returns: str base URL for services on the current service unit.
383,229
def setVersion(self, date_issued, version_id=None): if date_issued is not None: self.set_date_issued(date_issued) elif version_id is not None: self.set_version_by_num(version_id) else: LOG.error("date or version not set!") return if version_id is not None: self.set_version_by_num(version_id) else: LOG.info("set version to %s", self.version) self.set_version_by_date(date_issued) LOG.info("set version to %s", self.version) return
Legacy function... should use the other set_* for version and date as of 2016-10-20 used in: dipper/sources/HPOAnnotations.py 139: dipper/sources/CTD.py 99: dipper/sources/BioGrid.py 100: dipper/sources/MGI.py 255: dipper/sources/EOM.py 93: dipper/sources/Coriell.py 200: dipper/sources/MMRRC.py 77: # TODO set as deprecated :param date_issued: :param version_id: :return:
383,230
def _dict_rpartition( in_dict, keys, delimiter=DEFAULT_TARGET_DELIM, ordered_dict=False): : if delimiter in keys: all_but_last_keys, _, last_key = keys.rpartition(delimiter) ensure_dict_key(in_dict, all_but_last_keys, delimiter=delimiter, ordered_dict=ordered_dict) dict_pointer = salt.utils.data.traverse_dict(in_dict, all_but_last_keys, default=None, delimiter=delimiter) else: dict_pointer = in_dict last_key = keys return dict_pointer, last_key
Helper function to: - Ensure all but the last key in `keys` exist recursively in `in_dict`. - Return the dict at the one-to-last key, and the last key :param dict in_dict: The dict to work with. :param str keys: The delimited string with one or more keys. :param str delimiter: The delimiter to use in `keys`. Defaults to ':'. :param bool ordered_dict: Create OrderedDicts if keys are missing. Default: create regular dicts. :return tuple(dict, str)
383,231
def plane_intersection(strike1, dip1, strike2, dip2): norm1 = sph2cart(*pole(strike1, dip1)) norm2 = sph2cart(*pole(strike2, dip2)) norm1, norm2 = np.array(norm1), np.array(norm2) lon, lat = cart2sph(*np.cross(norm1, norm2, axis=0)) return geographic2plunge_bearing(lon, lat)
Finds the intersection of two planes. Returns a plunge/bearing of the linear intersection of the two planes. Also accepts sequences of strike1s, dip1s, strike2s, dip2s. Parameters ---------- strike1, dip1 : numbers or sequences of numbers The strike and dip (in degrees, following the right-hand-rule) of the first plane(s). strike2, dip2 : numbers or sequences of numbers The strike and dip (in degrees, following the right-hand-rule) of the second plane(s). Returns ------- plunge, bearing : arrays The plunge and bearing(s) (in degrees) of the line representing the intersection of the two planes.
383,232
def _openResources(self): logger.info("Opening: {}".format(self._fileName)) self._ncGroup = Dataset(self._fileName)
Opens the root Dataset.
383,233
def uuid_from_time(time_arg, node=None, clock_seq=None): if hasattr(time_arg, ): seconds = int(calendar.timegm(time_arg.utctimetuple())) microseconds = (seconds * 1e6) + time_arg.time().microsecond else: microseconds = int(time_arg * 1e6) intervals = int(microseconds * 10) + 0x01b21dd213814000 time_low = intervals & 0xffffffff time_mid = (intervals >> 32) & 0xffff time_hi_version = (intervals >> 48) & 0x0fff if clock_seq is None: clock_seq = random.getrandbits(14) else: if clock_seq > 0x3fff: raise ValueError() clock_seq_low = clock_seq & 0xff clock_seq_hi_variant = 0x80 | ((clock_seq >> 8) & 0x3f) if node is None: node = random.getrandbits(48) return uuid.UUID(fields=(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node), version=1)
Converts a datetime or timestamp to a type 1 :class:`uuid.UUID`. :param time_arg: The time to use for the timestamp portion of the UUID. This can either be a :class:`datetime` object or a timestamp in seconds (as returned from :meth:`time.time()`). :type datetime: :class:`datetime` or timestamp :param node: None integer for the UUID (up to 48 bits). If not specified, this field is randomized. :type node: long :param clock_seq: Clock sequence field for the UUID (up to 14 bits). If not specified, a random sequence is generated. :type clock_seq: int :rtype: :class:`uuid.UUID`
383,234
def Or(*xs, simplify=True): xs = [Expression.box(x).node for x in xs] y = exprnode.or_(*xs) if simplify: y = y.simplify() return _expr(y)
Expression disjunction (sum, OR) operator If *simplify* is ``True``, return a simplified expression.
383,235
def delete(self, path): path = normalize_api_path(path) if path in self.managers: raise HTTPError( 400, "Can't delete root of %s" % self.managers[path] ) return self.__delete(path)
Ensure that roots of our managers can't be deleted. This should be enforced by https://github.com/ipython/ipython/pull/8168, but rogue implementations might override this behavior.
383,236
def _sign_block(self, block): block_header = block.block_header header_bytes = block_header.SerializeToString() signature = self._identity_signer.sign(header_bytes) block.set_signature(signature) return block
The block should be complete and the final signature from the publishing validator (this validator) needs to be added.
383,237
def get_contents_static(self, block_alias, context): if not in context: elif resolved_view_name in lookup_area: static_block_contents = choice(lookup_area[resolved_view_name]) else: for url, contents in lookup_area.items(): if url.match(current_url): static_block_contents = choice(contents) break return static_block_contents
Returns contents of a static block.
383,238
def messages(self): if self._session: result = [] for msg in self._session.messages: ex = _create_exception_by_message(msg) result.append((type(ex), ex)) return result else: return None
Messages generated by server, see http://legacy.python.org/dev/peps/pep-0249/#cursor-messages
383,239
def lowwrap(self, fname): fun = getattr(self, fname) if fname in (, ): def wrap(*a, **kw): res = fun(*a, **kw) if not res or type(res) == type(0): return res else: return (res, type(res) != FuseFileInfo) elif fname == : def wrap(path, acc_sec, acc_nsec, mod_sec, mod_nsec): ts_acc = Timespec(tv_sec = acc_sec, tv_nsec = acc_nsec) ts_mod = Timespec(tv_sec = mod_sec, tv_nsec = mod_nsec) return fun(path, ts_acc, ts_mod) else: wrap = fun return wrap
Wraps the fname method when the C code expects a different kind of callback than we have in the fusepy API. (The wrapper is usually for performing some checks or transfromations which could be done in C but is simpler if done in Python.) Currently `open` and `create` are wrapped: a boolean flag is added which indicates if the result is to be kept during the opened file's lifetime or can be thrown away. Namely, it's considered disposable if it's an instance of FuseFileInfo.
383,240
def order_target_value(self, asset, target, limit_price=None, stop_price=None, style=None): if not self._can_order_asset(asset): return None target_amount = self._calculate_order_value_amount(asset, target) amount = self._calculate_order_target_amount(asset, target_amount) return self.order(asset, amount, limit_price=limit_price, stop_price=stop_price, style=style)
Place an order to adjust a position to a target value. If the position doesn't already exist, this is equivalent to placing a new order. If the position does exist, this is equivalent to placing an order for the difference between the target value and the current value. If the Asset being ordered is a Future, the 'target value' calculated is actually the target exposure, as Futures have no 'value'. Parameters ---------- asset : Asset The asset that this order is for. target : float The desired total value of ``asset``. limit_price : float, optional The limit price for the order. stop_price : float, optional The stop price for the order. style : ExecutionStyle The execution style for the order. Returns ------- order_id : str The unique identifier for this order. Notes ----- ``order_target_value`` does not take into account any open orders. For example: .. code-block:: python order_target_value(sid(0), 10) order_target_value(sid(0), 10) This code will result in 20 dollars of ``sid(0)`` because the first call to ``order_target_value`` will not have been filled when the second ``order_target_value`` call is made. See :func:`zipline.api.order` for more information about ``limit_price``, ``stop_price``, and ``style`` See Also -------- :class:`zipline.finance.execution.ExecutionStyle` :func:`zipline.api.order` :func:`zipline.api.order_target` :func:`zipline.api.order_target_percent`
383,241
def geojson_handler(geojson, hType=): hType_dict = { : [, ], : [, ], : [, ], } oldlist = [x for x in geojson[] if x[][].lower() in hType_dict[hType]] newlist = [] for each_dict in oldlist: geojson_type = each_dict[][].lower() if hType == : newlist.append( {: each_dict[].get(, None), : _coordinates_to_path(each_dict[][], hType, geojson_type), : each_dict[], } ) elif hType == : newlist.append( {: each_dict[][], : _coordinates_to_path(each_dict[][], hType, geojson_type), : each_dict[], } ) elif hType == : newlist.append( {: each_dict[][], : each_dict[][][0], : -each_dict[][][1], : each_dict[], } ) return newlist
Restructure a GeoJSON object in preparation to be added directly by add_map_data or add_data_set methods. The geojson will be broken down to fit a specific Highcharts (highmaps) type, either map, mapline or mappoint. Meta data in GeoJSON's properties object will be copied directly over to object['properties'] 1. geojson is the map data (GeoJSON) to be converted 2. hType is the type of highmap types. "map" will return GeoJSON polygons and multipolygons. "mapline" will return GeoJSON linestrings and multilinestrings. "mappoint" will return GeoJSON points and multipoints. default: "map"
383,242
def list_unique(cls): query = meta.Session.query(Predicate).distinct(Predicate.namespace) return query.all()
Return all unique namespaces :returns: a list of all predicates :rtype: list of ckan.model.semantictag.Predicate objects
383,243
def list_security_groups(call=None): s groups. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt-cloud -f list_security_groups opennebula actionThe list_security_groups function must be called with -f or --function.:NAME').text] = _xml_to_dict(group) return groups
Lists all security groups available to the user and the user's groups. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt-cloud -f list_security_groups opennebula
383,244
def __to_file(self, message_no): filename = self.__create_file_name(message_no) try: with codecs.open(filename, mode=, encoding=self.messages[message_no].encoding)\ as file__: file__.write(self.messages[message_no].output) except IOError as excep: print {0}\\ .format(filename) print excep print sys.exit(2) return filename
Write a single message to file
383,245
def absolute_path(path=None, base_dir=None): if path_is_remote(path): return path else: if os.path.isabs(path): return path else: if base_dir is None or not os.path.isabs(base_dir): raise TypeError("base_dir must be an absolute path.") return os.path.abspath(os.path.join(base_dir, path))
Return absolute path if path is local. Parameters: ----------- path : path to file base_dir : base directory used for absolute path Returns: -------- absolute path
383,246
def start(self): if self.running: raise Exception() if self.io_loop is None: self.io_loop = tornado.ioloop.IOLoop.current() self.running = True answer = tornado.gen.Future() self._schedule_ad(0, answer) return answer
Starts the advertise loop. Returns the result of the first ad request.
383,247
def apply_handler_to_all_logs(handler: logging.Handler, remove_existing: bool = False) -> None: for name, obj in logging.Logger.manager.loggerDict.items(): if remove_existing: obj.handlers = [] obj.addHandler(handler)
Applies a handler to all logs, optionally removing existing handlers. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Generally MORE SENSIBLE just to apply a handler to the root logger. Args: handler: the handler to apply remove_existing: remove existing handlers from logger first?
383,248
def _save_or_update(self): with self._resource_lock: if not self._config or not self._config._storage_path: raise Exception("self._config._storage path is undefined") if not self._config._base_name: raise Exception("self._config._base_name is undefined") if not os.path.exists(self._config._storage_path): os.makedirs(self._config._storage_path) path = self._get_cloud_provider_storage_path() with open(path, ) as storage: pickle.dump(self._config, storage, pickle.HIGHEST_PROTOCOL) pickle.dump(self._subscriptions, storage, pickle.HIGHEST_PROTOCOL)
Save or update the private state needed by the cloud provider.
383,249
def by_image_seq(blocks, image_seq): return list(filter(lambda block: blocks[block].ec_hdr.image_seq == image_seq, blocks))
Filter blocks to return only those associated with the provided image_seq number. Argument: List:blocks -- List of block objects to sort. Int:image_seq -- image_seq number found in ec_hdr. Returns: List -- List of block indexes matching image_seq number.
383,250
def subscribe_to_events(config, subscriber, events, model=None): kwargs = {} if model is not None: kwargs[] = model for evt in events: config.add_subscriber(subscriber, evt, **kwargs)
Helper function to subscribe to group of events. :param config: Pyramid contig instance. :param subscriber: Event subscriber function. :param events: Sequence of events to subscribe to. :param model: Model predicate value.
383,251
def ssh_cmd(self, name, ssh_command): if not self.container_exists(name=name): exit("Unknown container {0}".format(name)) if not self.container_running(name=name): exit("Container {0} is not running".format(name)) ip = self.get_container_ip(name) if not ip: exit("Failed to get network address for " "container {0}".format(name)) if ssh_command: ssh.do_cmd(, ip, , " ".join(ssh_command)) else: ssh.launch_shell(, ip, )
SSH into given container and executre command if given
383,252
def open_bucket(bucket_name, aws_access_key_id=None, aws_secret_access_key=None, aws_profile=None): session = boto3.session.Session( profile_name=aws_profile, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) s3 = session.resource() bucket = s3.Bucket(bucket_name) return bucket
Open an S3 Bucket resource. Parameters ---------- bucket_name : `str` Name of the S3 bucket. aws_access_key_id : `str`, optional The access key for your AWS account. Also set ``aws_secret_access_key``. aws_secret_access_key : `str`, optional The secret key for your AWS account. aws_profile : `str`, optional Name of AWS profile in :file:`~/.aws/credentials`. Use this instead of ``aws_access_key_id`` and ``aws_secret_access_key`` for file-based credentials. Returns ------- bucket : Boto3 S3 Bucket instance The S3 bucket as a Boto3 instance.
383,253
def detrend(arr, x=None, deg=5, tol=1e-3, maxloop=10): xx = numpy.arange(len(arr)) if x is None else x base = arr.copy() trend = base pol = numpy.ones((deg + 1,)) for _ in range(maxloop): pol_new = numpy.polyfit(xx, base, deg) pol_norm = numpy.linalg.norm(pol) diff_pol_norm = numpy.linalg.norm(pol - pol_new) if diff_pol_norm / pol_norm < tol: break pol = pol_new trend = numpy.polyval(pol, xx) base = numpy.minimum(base, trend) return trend
Compute a baseline trend of a signal
383,254
def refresh(self): r = self._client.request(, self.url) return self._deserialize(r.json(), self._manager)
Refresh this model from the server. Updates attributes with the server-defined values. This is useful where the Model instance came from a partial response (eg. a list query) and additional details are required. Existing attribute values will be overwritten.
383,255
def update_host_datetime(host, username, password, protocol=None, port=None, host_names=None): **[esxi-1.host.com, esxi-2.host.com] service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port) host_names = _check_hosts(service_instance, host, host_names) ret = {} for host_name in host_names: host_ref = _get_host_ref(service_instance, host, host_name=host_name) date_time_manager = _get_date_time_mgr(host_ref) try: date_time_manager.UpdateDateTime(datetime.datetime.utcnow()) except vim.fault.HostConfigFault as err: msg = vsphere.update_date_time\.format(host_name, err) log.debug(msg) ret.update({host_name: {: msg}}) continue ret.update({host_name: {: True}}) return ret
Update the date/time on the given host or list of host_names. This function should be used with caution since network delays and execution delays can result in time skews. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. host_names List of ESXi host names. When the host, username, and password credentials are provided for a vCenter Server, the host_names argument is required to tell vCenter which hosts should update their date/time. If host_names is not provided, the date/time will be updated for the ``host`` location instead. This is useful for when service instance connection information is used for a single ESXi host. CLI Example: .. code-block:: bash # Used for single ESXi host connection information salt '*' vsphere.update_date_time my.esxi.host root bad-password # Used for connecting to a vCenter Server salt '*' vsphere.update_date_time my.vcenter.location root bad-password \ host_names='[esxi-1.host.com, esxi-2.host.com]'
383,256
def send_file(self, fp, headers=None, cb=None, num_cb=10, query_args=None, chunked_transfer=False, size=None): provider = self.bucket.connection.provider try: spos = fp.tell() except IOError: spos = None self.read_from_stream = False def sender(http_conn, method, path, data, headers): if spos is not None and spos != fp.tell(): fp.seek(spos) elif spos is None and self.read_from_stream: if getattr(http_conn, , 0) < 3: http_conn.set_debuglevel(0) data_len = 0 if cb: if size: cb_size = size elif self.size: cb_size = self.size else: cb_size = 0 if chunked_transfer and cb_size == 0: cb_count = (1024 * 1024)/self.BufferSize elif num_cb > 1: cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0))) elif num_cb < 0: cb_count = -1 else: cb_count = 0 i = 0 cb(data_len, cb_size) bytes_togo = size if bytes_togo and bytes_togo < self.BufferSize: chunk = fp.read(bytes_togo) else: chunk = fp.read(self.BufferSize) if spos is None: self.read_from_stream = True while chunk: chunk_len = len(chunk) data_len += chunk_len if chunked_transfer: http_conn.send( % chunk_len) http_conn.send(chunk) http_conn.send() else: http_conn.send(chunk) if m: m.update(chunk) if bytes_togo: bytes_togo -= chunk_len if bytes_togo <= 0: break if cb: i += 1 if i == cb_count or cb_count == -1: cb(data_len, cb_size) i = 0 if bytes_togo and bytes_togo < self.BufferSize: chunk = fp.read(bytes_togo) else: chunk = fp.read(self.BufferSize) self.size = data_len if chunked_transfer: http_conn.send() if m: hd = m.hexdigest() self.md5, self.base64md5 = self.get_md5_from_hexdigest(hd) http_conn.send() if cb and (cb_count <= 1 or i > 0) and data_len > 0: cb(data_len, cb_size) response = http_conn.getresponse() body = response.read() http_conn.set_debuglevel(save_debug) self.bucket.connection.debug = save_debug if ((response.status == 500 or response.status == 503 or response.getheader()) and not chunked_transfer): self.name, headers, sender=sender, query_args=query_args) self.handle_version_headers(resp, force=True)
Upload a file to a key into a bucket on S3. :type fp: file :param fp: The file pointer to upload. The file pointer must point point at the offset from which you wish to upload. ie. if uploading the full file, it should point at the start of the file. Normally when a file is opened for reading, the fp will point at the first byte. See the bytes parameter below for more info. :type headers: dict :param headers: The headers to pass along with the PUT request :type cb: function :param cb: a callback function that will be called to report progress on the upload. The callback should accept two integer parameters, the first representing the number of bytes that have been successfully transmitted to S3 and the second representing the size of the to be transmitted object. :type num_cb: int :param num_cb: (optional) If a callback is specified with the cb parameter this parameter determines the granularity of the callback by defining the maximum number of times the callback will be called during the file transfer. Providing a negative integer will cause your callback to be called with each buffer read. :type size: int :param size: (optional) The Maximum number of bytes to read from the file pointer (fp). This is useful when uploading a file in multiple parts where you are splitting the file up into different ranges to be uploaded. If not specified, the default behaviour is to read all bytes from the file pointer. Less bytes may be available.
383,257
def _list_metric_descriptors(args, _): project_id = args[] pattern = args[] or descriptors = gcm.MetricDescriptors(project_id=project_id) dataframe = descriptors.as_dataframe(pattern=pattern) return _render_dataframe(dataframe)
Lists the metric descriptors in the project.
383,258
def bootstrap_paginate(parser, token): bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError(" takes at least one argument" " (Page object reference)" % bits[0]) page = parser.compile_filter(bits[1]) kwargs = {} bits = bits[2:] kwarg_re = re.compile(r) if len(bits): for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError("Malformed arguments to bootstrap_pagination paginate tag") name, value = match.groups() kwargs[name] = parser.compile_filter(value) return BootstrapPaginationNode(page, kwargs)
Renders a Page object as a Twitter Bootstrap styled pagination bar. Compatible with Bootstrap 3.x and 4.x only. Example:: {% bootstrap_paginate page_obj range=10 %} Named Parameters:: range - The size of the pagination bar (ie, if set to 10 then, at most, 10 page numbers will display at any given time) Defaults to None, which shows all pages. size - Accepts "small", and "large". Defaults to None which is the standard size. show_prev_next - Accepts "true" or "false". Determines whether or not to show the previous and next page links. Defaults to "true" show_first_last - Accepts "true" or "false". Determines whether or not to show the first and last page links. Defaults to "false" previous_label - The text to display for the previous page link. Defaults to "&larr;" next_label - The text to display for the next page link. Defaults to "&rarr;" first_label - The text to display for the first page link. Defaults to "&laquo;" last_label - The text to display for the last page link. Defaults to "&raquo;" url_view_name - The named URL to use. Defaults to None. If None, then the default template simply appends the url parameter as a relative URL link, eg: <a href="?page=1">1</a> url_param_name - The name of the parameter to use in the URL. If url_view_name is set to None, this string is used as the parameter name in the relative URL path. If a URL name is specified, this string is used as the parameter name passed into the reverse() method for the URL. url_extra_args - This is used only in conjunction with url_view_name. When referencing a URL, additional arguments may be passed in as a list. url_extra_kwargs - This is used only in conjunction with url_view_name. When referencing a URL, additional named arguments may be passed in as a dictionary. url_get_params - The other get parameters to pass, only the page number will be overwritten. Use this to preserve filters. url_anchor - The anchor to use in URLs. Defaults to None. extra_pagination_classes - A space separated list of CSS class names that will be added to the top level <ul> HTML element. In particular, this can be utilized in Bootstrap 4 installatinos to add the appropriate alignment classes from Flexbox utilites, eg: justify-content-center
383,259
def get_countries(is_legacy_xml=False): countries = {} if sys.platform == and getattr(sys, , False): data_dir = path.dirname(sys.executable) else: data_dir = path.dirname(__file__) if is_legacy_xml: log.debug(.format( str(data_dir) + )) f = io.open(str(data_dir) + , , encoding=) data = f.read() if not data: return {} dom = parseString(data) entries = dom.getElementsByTagName() for entry in entries: code = entry.getElementsByTagName( )[0].firstChild.data name = entry.getElementsByTagName( )[0].firstChild.data countries[code] = name.title() else: log.debug(.format( str(data_dir) + )) f = io.open(str(data_dir) + , , encoding=) csv_reader = csv.reader(f, delimiter=, quotechar=) for row in csv_reader: code = row[0] name = row[1] countries[code] = name return countries
The function to generate a dictionary containing ISO_3166-1 country codes to names. Args: is_legacy_xml (:obj:`bool`): Whether to use the older country code list (iso_3166-1_list_en.xml). Returns: dict: A mapping of country codes as the keys to the country names as the values.
383,260
def get_romfile_path(game, inttype=Integrations.DEFAULT): for extension in EMU_EXTENSIONS.keys(): possible_path = get_file_path(game, "rom" + extension, inttype) if possible_path: return possible_path raise FileNotFoundError("No romfiles found for game: %s" % game)
Return the path to a given game's romfile
383,261
def mac_address_table_static_mac_address(self, **kwargs): config = ET.Element("config") mac_address_table = ET.SubElement(config, "mac-address-table", xmlns="urn:brocade.com:mgmt:brocade-mac-address-table") static = ET.SubElement(mac_address_table, "static") forward_key = ET.SubElement(static, "forward") forward_key.text = kwargs.pop() interface_type_key = ET.SubElement(static, "interface-type") interface_type_key.text = kwargs.pop() interface_name_key = ET.SubElement(static, "interface-name") interface_name_key.text = kwargs.pop() vlan_key = ET.SubElement(static, "vlan") vlan_key.text = kwargs.pop() vlanid_key = ET.SubElement(static, "vlanid") vlanid_key.text = kwargs.pop() mac_address = ET.SubElement(static, "mac-address") mac_address.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
383,262
def send_wrapped(self, text): lines = word_wrap(text, self.columns) for line in lines: self.send_cc(line + )
Send text padded and wrapped to the user's screen width.
383,263
def date_this_century(self, before_today=True, after_today=False): today = date.today() this_century_start = date(today.year - (today.year % 100), 1, 1) next_century_start = date(this_century_start.year + 100, 1, 1) if before_today and after_today: return self.date_between_dates( this_century_start, next_century_start) elif not before_today and after_today: return self.date_between_dates(today, next_century_start) elif not after_today and before_today: return self.date_between_dates(this_century_start, today) else: return today
Gets a Date object for the current century. :param before_today: include days in current century before today :param after_today: include days in current century after today :example Date('2012-04-04') :return Date
383,264
def char(self, c: str) -> None: if self.peek() == c: self.offset += 1 else: raise UnexpectedInput(self, f"char ")
Parse the specified character. Args: c: One-character string. Raises: EndOfInput: If past the end of `self.input`. UnexpectedInput: If the next character is different from `c`.
383,265
def import_locations(self, gpx_file): self._gpx_file = gpx_file data = utils.prepare_xml_read(gpx_file, objectify=True) try: self.metadata.import_metadata(data.metadata) except AttributeError: pass for waypoint in data.wpt: latitude = waypoint.get() longitude = waypoint.get() try: name = waypoint.name.text except AttributeError: name = None try: description = waypoint.desc.text except AttributeError: description = None try: elevation = float(waypoint.ele.text) except AttributeError: elevation = None try: time = utils.Timestamp.parse_isoformat(waypoint.time.text) except AttributeError: time = None self.append(Waypoint(latitude, longitude, name, description, elevation, time))
Import GPX data files. ``import_locations()`` returns a list with :class:`~gpx.Waypoint` objects. It expects data files in GPX format, as specified in `GPX 1.1 Schema Documentation`_, which is XML such as:: <?xml version="1.0" encoding="utf-8" standalone="no"?> <gpx version="1.1" creator="PocketGPSWorld.com" xmlns="http://www.topografix.com/GPX/1/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd"> <wpt lat="52.015" lon="-0.221"> <name>Home</name> <desc>My place</desc> </wpt> <wpt lat="52.167" lon="0.390"> <name>MSR</name> <desc>Microsoft Research, Cambridge</desc> </wpt> </gpx> The reader uses the :mod:`ElementTree` module, so should be very fast when importing data. The above file processed by ``import_locations()`` will return the following ``list`` object:: [Waypoint(52.015, -0.221, "Home", "My place"), Waypoint(52.167, 0.390, "MSR", "Microsoft Research, Cambridge")] Args: gpx_file (iter): GPX data to read Returns: list: Locations with optional comments .. _GPX 1.1 Schema Documentation: http://www.topografix.com/GPX/1/1/
383,266
def is_reversible(P): import msmtools.analysis as msmana sets = connected_sets(P, strong=False) for s in sets: Ps = P[s, :][:, s] if not msmana.is_transition_matrix(Ps): return False pi = msmana.stationary_distribution(Ps) X = pi[:, None] * Ps if not np.allclose(X, X.T): return False return True
Returns if P is reversible on its weakly connected sets
383,267
def get_operation_pattern(server_url, request_url_pattern): if server_url[-1] == "/": server_url = server_url[:-1] if is_absolute(server_url): return request_url_pattern.replace(server_url, "", 1) return path_qs(request_url_pattern).replace(server_url, "", 1)
Return an updated request URL pattern with the server URL removed.
383,268
def create_mon_path(path, uid=-1, gid=-1): if not os.path.exists(path): os.makedirs(path) os.chown(path, uid, gid);
create the mon path if it does not exist
383,269
def get_first_comments_or_remarks(recID=-1, ln=CFG_SITE_LANG, nb_comments=, nb_reviews=, voted=-1, reported=-1, user_info=None, show_reviews=False): _ = gettext_set_language(ln) warnings = [] voted = wash_url_argument(voted, ) reported = wash_url_argument(reported, ) if not isinstance(recID, int): return () if recID >= 1: if CFG_WEBCOMMENT_ALLOW_REVIEWS: res_reviews = query_retrieve_comments_or_remarks( recID=recID, display_order="hh", ranking=1, limit=nb_comments, user_info=user_info) nb_res_reviews = len(res_reviews) if isinstance(nb_reviews, int) and nb_reviews < len(res_reviews): first_res_reviews = res_reviews[:nb_reviews] else: first_res_reviews = res_reviews if CFG_WEBCOMMENT_ALLOW_COMMENTS: res_comments = query_retrieve_comments_or_remarks( recID=recID, display_order="od", ranking=0, limit=nb_reviews, user_info=user_info) nb_res_comments = len(res_comments) if isinstance(nb_comments, int) and nb_comments < len( res_comments): first_res_comments = res_comments[:nb_comments] else: first_res_comments = res_comments else: try: raise InvenioWebCommentError( _(, recid=recID)) except InvenioWebCommentError as exc: register_exception() body = webcomment_templates.tmpl_error(exc.message, ln) return body if recID >= 1: comments = reviews = "" if reported > 0: try: raise InvenioWebCommentWarning( _()) except InvenioWebCommentWarning as exc: register_exception(stream=) warnings.append((exc.message, )) elif reported == 0: try: raise InvenioWebCommentWarning( _()) except InvenioWebCommentWarning as exc: register_exception(stream=) warnings.append((exc.message, )) if CFG_WEBCOMMENT_ALLOW_COMMENTS: grouped_comments = group_comments_by_round( first_res_comments, ranking=0) comments = webcomment_templates.tmpl_get_first_comments_without_ranking( recID, ln, grouped_comments, nb_res_comments, warnings) if show_reviews: if CFG_WEBCOMMENT_ALLOW_REVIEWS: avg_score = calculate_avg_score(res_reviews) if voted > 0: try: raise InvenioWebCommentWarning( _()) except InvenioWebCommentWarning as exc: register_exception(stream=) warnings.append((exc.message, )) elif voted == 0: try: raise InvenioWebCommentWarning( _()) except InvenioWebCommentWarning as exc: register_exception(stream=) warnings.append((exc.message, )) grouped_reviews = group_comments_by_round( first_res_reviews, ranking=0) reviews = webcomment_templates.tmpl_get_first_comments_with_ranking( recID, ln, grouped_reviews, nb_res_reviews, avg_score, warnings) return (comments, reviews) else: return(webcomment_templates.tmpl_get_first_remarks(first_res_comments, ln, nb_res_comments), None)
Gets nb number comments/reviews or remarks. In the case of comments, will get both comments and reviews Comments and remarks sorted by most recent date, reviews sorted by highest helpful score :param recID: record id :param ln: language :param nb_comments: number of comment or remarks to get :param nb_reviews: number of reviews or remarks to get :param voted: 1 if user has voted for a remark :param reported: 1 if user has reported a comment or review :return: if comment, tuple (comments, reviews) both being html of first nb comments/reviews if remark, tuple (remakrs, None)
383,270
async def set_failover_mode(mode): jar = aiohttp.CookieJar(unsafe=True) websession = aiohttp.ClientSession(cookie_jar=jar) try: modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession) await modem.login(password=sys.argv[2]) await modem.set_failover_mode(mode) await modem.logout() except eternalegypt.Error: print("Could not login") await websession.close()
Example of printing the current upstream.
383,271
def seektime(self, disk): args = { : disk, } self._seektime_chk.check(args) return self._client.json("disk.seektime", args)
Gives seek latency on disk which is a very good indication to the `type` of the disk. it's a very good way to verify if the underlying disk type is SSD or HDD :param disk: disk path or name (/dev/sda, or sda) :return: a dict as follows {'device': '<device-path>', 'elapsed': <seek-time in us', 'type': '<SSD or HDD>'}
383,272
def bootstrapping_dtrajs(dtrajs, lag, N_full, nbs=10000, active_set=None): Q = len(dtrajs) if active_set is not None: N = active_set.size else: N = N_full traj_ind = [] state1 = [] state2 = [] q = 0 for traj in dtrajs: traj_ind.append(q*np.ones(traj[:-lag].size)) state1.append(traj[:-lag]) state2.append(traj[lag:]) q += 1 traj_inds = np.concatenate(traj_ind) pairs = N_full * np.concatenate(state1) + np.concatenate(state2) data = np.ones(pairs.size) Ct_traj = scipy.sparse.coo_matrix((data, (traj_inds, pairs)), shape=(Q, N_full*N_full)) Ct_traj = Ct_traj.tocsr() svals = np.zeros((nbs, N)) for s in range(nbs): sel = np.random.choice(Q, Q, replace=True) Ct_sel = Ct_traj[sel, :].sum(axis=0) Ct_sel = np.asarray(Ct_sel).reshape((N_full, N_full)) if active_set is not None: from pyemma.util.linalg import submatrix Ct_sel = submatrix(Ct_sel, active_set) svals[s, :] = scl.svdvals(Ct_sel) smean = np.mean(svals, axis=0) sdev = np.std(svals, axis=0) return smean, sdev
Perform trajectory based re-sampling. Parameters ---------- dtrajs : list of discrete trajectories lag : int lag time N_full : int Number of states in discrete trajectories. nbs : int, optional Number of bootstrapping samples active_set : ndarray Indices of active set, all count matrices will be restricted to active set. Returns ------- smean : ndarray(N,) mean values of singular values sdev : ndarray(N,) standard deviations of singular values
383,273
def file_md5(self, resource): warnings.warn( "file_md5 is deprecated; use resource_md5 instead", DeprecationWarning, stacklevel=2) return self.resource_md5(resource)
Deprecated alias for *resource_md5*.
383,274
def enterEvent(self, event): super(XViewPanelItem, self).enterEvent(event) self._hovered = True self.update()
Mark the hovered state as being true. :param event | <QtCore.QEnterEvent>
383,275
def reboot(name, call=None): action datacenter_id = get_datacenter_id() conn = get_conn() node = get_node(conn, name) conn.reboot_server(datacenter_id=datacenter_id, server_id=node[]) return True
reboot a machine by name :param name: name given to the machine :param call: call value in this case is 'action' :return: true if successful CLI Example: .. code-block:: bash salt-cloud -a reboot vm_name
383,276
def _get(self, uri, params=None, headers=None): if not headers: headers = self._get_headers() logging.debug("URI=" + str(uri)) logging.debug("HEADERS=" + str(headers)) response = self.session.get(uri, headers=headers, params=params) logging.debug("STATUS=" + str(response.status_code)) if response.status_code == 200: return response.json() else: logging.error(b"ERROR=" + response.content) response.raise_for_status()
Simple GET request for a given path.
383,277
def create_install_template_skin(self): ckan_extension_template(self.name, self.target) self.install_package_develop( + self.name + )
Create an example ckan extension for this environment and install it
383,278
def to_xdr_object(self): selling = self.selling.to_xdr_object() buying = self.buying.to_xdr_object() price = Operation.to_xdr_price(self.price) price = Xdr.types.Price(price[], price[]) amount = Operation.to_xdr_amount(self.amount) create_passive_offer_op = Xdr.types.CreatePassiveOfferOp( selling, buying, amount, price) self.body.type = Xdr.const.CREATE_PASSIVE_OFFER self.body.createPassiveOfferOp = create_passive_offer_op return super(CreatePassiveOffer, self).to_xdr_object()
Creates an XDR Operation object that represents this :class:`CreatePassiveOffer`.
383,279
def email_domain_disposable(value): domain = helpers.get_domain_from_email_address(value) if domain.lower() in disposable_domains: raise ValidationError(MESSAGE_USE_COMPANY_EMAIL)
Confirms that the email address is not using a disposable service. @param {str} value @returns {None} @raises AssertionError
383,280
def get_tags(self): j, _ = self.datacenter.request(, self.path + ) return j
:: GET /:login/machines/:id/tags :Returns: complete set of tags for this machine :rtype: :py:class:`dict` A local copy is not kept because these are essentially search keys.
383,281
def delete_location(self, location_name): location = self.find_by_name(location_name, self.locations) if not location: return False sites = location.sites self.locations.remove(location) for site in sites: if site: site.location = del location return sites
Remove location with name location_name from self.locations. If the location had any sites, change site.location to "".
383,282
def get(self, remote, local=None): if isinstance(local, file_type): local_file = local elif local is None: local_file = buffer_type() else: local_file = open(local, ) self.conn.retrbinary("RETR %s" % remote, local_file.write) if isinstance(local, file_type): pass elif local is None: contents = local_file.getvalue() local_file.close() return contents else: local_file.close() return None
Gets the file from FTP server local can be: a file: opened for writing, left open a string: path to output file None: contents are returned
383,283
def expiry_time(ns, cavs): prefix = ns.resolve(STD_NAMESPACE) time_before_cond = condition_with_prefix( prefix, COND_TIME_BEFORE) t = None for cav in cavs: if not cav.first_party(): continue cav = cav.caveat_id_bytes.decode() name, rest = parse_caveat(cav) if name != time_before_cond: continue try: et = pyrfc3339.parse(rest, utc=True).replace(tzinfo=None) if t is None or et < t: t = et except ValueError: continue return t
Returns the minimum time of any time-before caveats found in the given list or None if no such caveats were found. The ns parameter is :param ns: used to determine the standard namespace prefix - if the standard namespace is not found, the empty prefix is assumed. :param cavs: a list of pymacaroons.Caveat :return: datetime.DateTime or None.
383,284
def Enumerate(): hid_guid = GUID() hid.HidD_GetHidGuid(ctypes.byref(hid_guid)) devices = setupapi.SetupDiGetClassDevsA( ctypes.byref(hid_guid), None, None, 0x12) index = 0 interface_info = DeviceInterfaceData() interface_info.cbSize = ctypes.sizeof(DeviceInterfaceData) out = [] while True: result = setupapi.SetupDiEnumDeviceInterfaces( devices, 0, ctypes.byref(hid_guid), index, ctypes.byref(interface_info)) index += 1 if not result: break detail_len = wintypes.DWORD() result = setupapi.SetupDiGetDeviceInterfaceDetailA( devices, ctypes.byref(interface_info), None, 0, ctypes.byref(detail_len), None) detail_len = detail_len.value if detail_len == 0: continue buf = ctypes.create_string_buffer(detail_len) interface_detail = DeviceInterfaceDetailData.from_buffer(buf) interface_detail.cbSize = ctypes.sizeof(DeviceInterfaceDetailData) result = setupapi.SetupDiGetDeviceInterfaceDetailA( devices, ctypes.byref(interface_info), ctypes.byref(interface_detail), detail_len, None, None) if not result: raise ctypes.WinError() descriptor = base.DeviceDescriptor() path_len = detail_len - ctypes.sizeof(wintypes.DWORD) descriptor.path = ctypes.string_at( ctypes.addressof(interface_detail.DevicePath), path_len) device = None try: device = OpenDevice(descriptor.path, True) except WindowsError as e: if e.winerror == ERROR_ACCESS_DENIED: continue else: raise e try: FillDeviceAttributes(device, descriptor) FillDeviceCapabilities(device, descriptor) out.append(descriptor.ToPublicDict()) finally: kernel32.CloseHandle(device) return out
See base class.
383,285
def _get_socket_addresses(self): family = socket.AF_UNSPEC if not socket.has_ipv6: family = socket.AF_INET try: addresses = socket.getaddrinfo(self._parameters[], self._parameters[], family, socket.SOCK_STREAM) except socket.gaierror as why: raise AMQPConnectionError(why) return addresses
Get Socket address information. :rtype: list
383,286
def _compose_chapters(self): for count in range(self.chapter_count): chapter_num = count + 1 c = Chapter(self.markov, chapter_num) self.chapters.append(c)
Creates a chapters and appends them to list
383,287
def iter_markers(self): marker_finder = _MarkerFinder.from_stream(self._stream) start = 0 marker_code = None while marker_code != JPEG_MARKER_CODE.EOI: marker_code, segment_offset = marker_finder.next(start) marker = _MarkerFactory( marker_code, self._stream, segment_offset ) yield marker start = segment_offset + marker.segment_length
Generate a (marker_code, segment_offset) 2-tuple for each marker in the JPEG *stream*, in the order they occur in the stream.
383,288
def _deep_value(*args, **kwargs): node, keys = args[0], args[1:] for key in keys: node = node.get(key, {}) default = kwargs.get(, {}) if node in ({}, [], None): node = default return node
Drills down into tree using the keys
383,289
def row_contributions(self, X): utils.validation.check_is_fitted(self, ) return np.square(self.row_coordinates(X)).div(self.eigenvalues_, axis=)
Returns the row contributions towards each principal component. Each row contribution towards each principal component is equivalent to the amount of inertia it contributes. This is calculated by dividing the squared row coordinates by the eigenvalue associated to each principal component.
383,290
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None): tz = pytz.timezone(zone) if utcoffset is None: return tz utcoffset = memorized_timedelta(utcoffset) dstoffset = memorized_timedelta(dstoffset) try: return tz._tzinfos[(utcoffset, dstoffset, tzname)] except KeyError: pass for localized_tz in tz._tzinfos.values(): if (localized_tz._utcoffset == utcoffset and localized_tz._dst == dstoffset): return localized_tz inf = (utcoffset, dstoffset, tzname) tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos) return tz._tzinfos[inf]
Factory function for unpickling pytz tzinfo instances. This is shared for both StaticTzInfo and DstTzInfo instances, because database changes could cause a zones implementation to switch between these two base classes and we can't break pickles on a pytz version upgrade.
383,291
def save(self, filename, format=None): if format is None: if filename.endswith((, )): format = elif filename.endswith(()): format = else: format = else: if format is : if not filename.endswith((, )): filename = filename + elif format is not and format is not : raise ValueError("Invalid format: {}. Supported formats are and and ".format(format)) url = _make_internal_url(filename) with cython_context(): if format is : self.__proxy__.save(url) elif format is : assert filename.endswith((, )) self.__proxy__.save_as_csv(url, {}) elif format is : self.export_json(url) else: raise ValueError("Unsupported format: {}".format(format))
Save the SFrame to a file system for later use. Parameters ---------- filename : string The location to save the SFrame. Either a local directory or a remote URL. If the format is 'binary', a directory will be created at the location which will contain the sframe. format : {'binary', 'csv', 'json'}, optional Format in which to save the SFrame. Binary saved SFrames can be loaded much faster and without any format conversion losses. If not given, will try to infer the format from filename given. If file name ends with 'csv' or '.csv.gz', then save as 'csv' format, otherwise save as 'binary' format. See export_csv for more csv saving options. See Also -------- load_sframe, SFrame Examples -------- >>> # Save the sframe into binary format >>> sf.save('data/training_data_sframe') >>> # Save the sframe into csv format >>> sf.save('data/training_data.csv', format='csv')
383,292
def add_identity_parser(subparsers, parent_parser): parser = subparsers.add_parser( , help=, description=) identity_parsers = parser.add_subparsers( title="subcommands", dest="subcommand") identity_parsers.required = True policy_parser = identity_parsers.add_parser( , help= , description= ) policy_parsers = policy_parser.add_subparsers( title=, dest=) policy_parsers.required = True create_parser = policy_parsers.add_parser( , help= , description= ) create_parser.add_argument( , , type=str, help=) create_target_group = create_parser.add_mutually_exclusive_group() create_target_group.add_argument( , , type=str, help=) create_target_group.add_argument( , type=str, help="identify the URL of a validatorhttp://localhost:8008--waitnamename of the new policyrulerule with the format "PERMIT_KEY <key>" or "DENY_KEY <key> (multiple "rule" arguments can be specified)listLists the current policiesLists the policies that are currently set in state.--urls REST API", default=) list_parser.add_argument( , default=, choices=[, , , ], help=) role_parser = identity_parsers.add_parser( , help= , description= ) role_parsers = role_parser.add_subparsers( title=, dest=) role_parsers.required = True create_parser = role_parsers.add_parser( , help=, description= ) create_parser.add_argument( , , type=str, help=) create_parser.add_argument( , type=int, default=15, help= ) create_target_group = create_parser.add_mutually_exclusive_group() create_target_group.add_argument( , , type=str, help=) create_target_group.add_argument( , type=str, help="the URL of a validatorhttp://localhost:8008namename of the rolepolicyidentify policy that role will be restricted tolistLists the current keys and values of rolesDisplays the roles that are currently set in state.--urls REST API", default=) list_parser.add_argument( , default=, choices=[, , , ], help=)
Creates the arg parsers needed for the identity command and its subcommands.
383,293
def _isinstance(expr, classname): for cls in type(expr).__mro__: if cls.__name__ == classname: return True return False
Check whether `expr` is an instance of the class with name `classname` This is like the builtin `isinstance`, but it take the `classname` a string, instead of the class directly. Useful for when we don't want to import the class for which we want to check (also, remember that printer choose rendering method based on the class name, so this is totally ok)
383,294
def _watchdog_queue(self): while not self.quit: k = self.queue.get() if k == : self.quit = True self.switch_queue.put()
从queue里取出字符执行命令
383,295
def _SGraphFromJsonTree(json_str): g = json.loads(json_str) vertices = [_Vertex(x[], dict([(str(k), v) for k, v in _six.iteritems(x) if k != ])) for x in g[]] edges = [_Edge(x[], x[], dict([(str(k), v) for k, v in _six.iteritems(x) if k != and k != ])) for x in g[]] sg = _SGraph().add_vertices(vertices) if len(edges) > 0: sg = sg.add_edges(edges) return sg
Convert the Json Tree to SGraph
383,296
def generate_payload(self, config=None, context=None): for name, plugin in iteritems(self._registered): if plugin.supports(config, context): logger.debug( % name) return plugin.generate_payload(config, context) logger.debug() return { : context }
Generate payload by iterating over registered plugins. Merges . :param context: current context. :param config: honeybadger configuration. :return: a dict with the generated payload.
383,297
def __make_id(receiver): if __is_bound_method(receiver): return (id(receiver.__func__), id(receiver.__self__)) return id(receiver)
Generate an identifier for a callable signal receiver. This is used when disconnecting receivers, where we need to correctly establish equivalence between the input receiver and the receivers assigned to a signal. Args: receiver: A callable object. Returns: An identifier for the receiver.
383,298
def do_authorization(self, transactionid, amt): args = self._sanitize_locals(locals()) return self._call(, **args)
Shortcut for the DoAuthorization method. Use the TRANSACTIONID from DoExpressCheckoutPayment for the ``transactionid``. The latest version of the API does not support the creation of an Order from `DoDirectPayment`. The `amt` should be the same as passed to `DoExpressCheckoutPayment`. Flow for a payment involving a `DoAuthorization` call:: 1. One or many calls to `SetExpressCheckout` with pertinent order details, returns `TOKEN` 1. `DoExpressCheckoutPayment` with `TOKEN`, `PAYMENTACTION` set to Order, `AMT` set to the amount of the transaction, returns `TRANSACTIONID` 1. `DoAuthorization` with `TRANSACTIONID` and `AMT` set to the amount of the transaction. 1. `DoCapture` with the `AUTHORIZATIONID` (the `TRANSACTIONID` returned by `DoAuthorization`)
383,299
def __detect_os_identity_api_version(self): ver = os.getenv(, ) if ver == : log.debug( "Using OpenStack Identity API v3" " because of environmental variable setting `OS_IDENTITY_API_VERSION=3`") return elif ver == or ver.startswith(): log.debug( "Using OpenStack Identity API v2" " because of environmental variable setting `OS_IDENTITY_API_VERSION=2`") return elif self._os_auth_url.endswith(): log.debug( "Using OpenStack Identity API v3 because of `/v3` ending in auth URL;" " set environmental variable OS_IDENTITY_API_VERSION to force use of Identity API v2 instead.") return elif self._os_auth_url.endswith(): log.debug( "Using OpenStack Identity API v2 because of `/v2.0` ending in auth URL;" " set environmental variable OS_IDENTITY_API_VERSION to force use of Identity API v3 instead.") return else: return None
Return preferred OpenStack Identity API version (either one of the two strings ``'2'`` or ``'3'``) or ``None``. The following auto-detection strategies are tried (in this order): #. Read the environmental variable `OS_IDENTITY_API_VERSION` and check if its value is one of the two strings ``'2'`` or ``'3'``; #. Check if a version tag like ``/v3`` or ``/v2.0`` ends the OpenStack auth URL. If none of the above worked, return ``None``. For more information on ``OS_IDENTITY_API_VERSION``, please see `<https://docs.openstack.org/developer/python-openstackclient/authentication.html>`_.