Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
6,700
def markdown(iterable, renderer=HTMLRenderer): with renderer() as renderer: return renderer.render(Document(iterable))
Output HTML with default settings. Enables inline and block-level HTML tags.
6,701
def push_account_task(obj_id): lock_id = "%s-push-account-%s" % (settings.ENV_PREFIX, obj_id) acquire_lock = lambda: cache.add(lock_id, "true", LOCK_EXPIRE) release_lock = lambda: cache.delete(lock_id) if acquire_lock(): UserModel = get_user_model() try: upload_intercom_user(obj_id) except UserModel.DoesNotExist: release_lock() release_lock()
Async: push_account_task.delay(Account.id)
6,702
def get_first(): client = po.connect() all_droplets = client.droplets.list() id = all_droplets[0][] return client.droplets.get(id)
return first droplet
6,703
def _get_hosted_zone_limit(self, limit_type, hosted_zone_id): result = self.conn.get_hosted_zone_limit( Type=limit_type, HostedZoneId=hosted_zone_id ) return result
Return a hosted zone limit [recordsets|vpc_associations] :rtype: dict
6,704
def fetch_token(self, client_secret, code, context, scope, redirect_uri, token_url=): res = self.post(token_url, {: self.client_id, : client_secret, : code, : context, : scope, : , : redirect_uri}, headers={: }) self._session.headers.update(self._oauth_headers(self.client_id, res[])) return res
Fetches a token from given token_url, using given parameters, and sets up session headers for future requests. redirect_uri should be the same as your callback URL. code, context, and scope should be passed as parameters to your callback URL on app installation. Raises HttpException on failure (same as Connection methods).
6,705
def search(self, fields=None, query=None, filters=None): results = self.search_json(fields, query)[] results = self.search_normalize(results) entities = [ type(self)(self._server_config, **result) for result in results ] if filters is not None: entities = self.search_filter(entities, filters) return entities
Search for entities. At its simplest, this method searches for all entities of a given kind. For example, to ask for all :class:`nailgun.entities.LifecycleEnvironment` entities:: LifecycleEnvironment().search() Values on an entity are used to generate a search query, and the ``fields`` argument can be used to specify which fields should be used when generating a search query:: lc_env = LifecycleEnvironment(name='foo', organization=1) results = lc_env.search() # Search by name and organization. results = lc_env.search({'name', 'organization'}) # Same. results = lc_env.search({'name'}) # Search by name. results = lc_env.search({'organization'}) # Search by organization results = lc_env.search(set()) # Search for all lifecycle envs. results = lc_env.search({'library'}) # Error! In some cases, the simple search queries that can be generated by NailGun are not sufficient. In this case, you can pass in a raw search query instead. For example, to search for all lifecycle environments with a name of 'foo':: LifecycleEnvironment().search(query={'search': 'name="foo"'}) The example above is rather pointless: it is easier and more concise to use a generated query. But — and this is a **very** important "but" — the manual search query is melded in to the generated query. This can be used to great effect:: LifecycleEnvironment(name='foo').search(query={'per_page': 50}) For examples of what the final search queries look like, see :meth:`search_payload`. (That method also accepts the ``fields`` and ``query`` arguments.) In some cases, the server's search facilities may be insufficient, or it may be inordinately difficult to craft a search query. In this case, you can filter search results locally. For example, to ask the server for a list of all lifecycle environments and then locally search through the results for the lifecycle environment named "foo":: LifecycleEnvironment().search(filters={'name': 'foo'}) Be warned that filtering locally can be **very** slow. NailGun must ``read()`` every single entity returned by the server before filtering results. This is because the values used in the filtering process may not have been returned by the server in the initial response to the search. The fact that all entities are read when ``filters`` is specified can be used to great effect. For example, this search returns a fully populated list of every single lifecycle environment:: LifecycleEnvironment().search(filters={}) :param fields: A set naming which fields should be used when generating a search query. If ``None``, all values on the entity are used. If an empty set, no values are used. :param query: A dict containing a raw search query. This is melded in to the generated search query like so: ``{generated: query}.update({manual: query})``. :param filters: A dict. Used to filter search results locally. :return: A list of entities, all of type ``type(self)``.
6,706
def get_columns(self, font): font = self.get_font(font) return self.fonts[six.text_type(font)][]
Return the number of columns for the given font.
6,707
def fit(self, X, y, step_size=0.1, init_weights=None, warm_start: bool=False): assert len(np.shape(X)) == 2, .format(len(np.shape(X))) assert np.shape(X)[0] > 1, \ .format(np.shape(X)[0]) assert np.shape(X)[1] == len(y), ( .format(np.shape(X)[1], len(y))) if warm_start: assert self._weights is not None, weights = self._weights elif init_weights is None: weights = np.array([1.0] * len(X)) else: assert (len(init_weights) == np.shape(X)[0]), ( .format(np.shape(X)[0], len(init_weights))) weights = init_weights def __is_better_score(score_to_test, score): return score_to_test > score if self.maximize else not score_to_test > score score = 0 best_score = self.maximize - 0.5 while __is_better_score(best_score, score): best_score = self.metric(y, np.average(np.power(X, self._power), weights=weights, axis=0) ** ( 1.0 / self._power)) score = best_score best_index, best_step = -1, 0.0 for j in range(len(X)): delta = np.array([(0 if k != j else step_size) for k in range(len(X))]) s = self.metric(y, np.average(np.power(X, self._power), weights=weights + delta, axis=0) ** ( 1.0 / self._power)) if __is_better_score(s, best_score): best_index, best_score, best_step = j, s, step_size continue if weights[j] - step_size >= 0: s = self.metric(y, np.average(np.power(X, self._power), weights=weights - delta, axis=0) ** ( 1.0 / self._power)) if s > best_score: best_index, best_score, best_step = j, s, -step_size if __is_better_score(best_score, score): weights[best_index] += best_step self._weights = weights self._score = best_score return self
Fit the weights on the given predictions. Args: X (array-like): Predictions of different models for the labels. y (array-like): Labels. step_size (float): Step size for optimizing the weights. Smaller step sizes most likely improve resulting score but increases training time. init_weights (array-like): Initial weights for training. When `warm_start` is used `init_weights` are ignored. warm_start (bool): Continues training. Will only work when `fit` has been called with this object earlier. When `warm_start` is used `init_weights` are ignored. Returns: self
6,708
def watch_files(self): try: while 1: sleep(1) try: files_stat = self.get_files_stat() except SystemExit: logger.error("Error occurred, server shut down") self.shutdown_server() if self.files_stat != files_stat: logger.info("Changes detected, start rebuilding..") try: generator.re_generate() global _root _root = generator.root except SystemExit: logger.error("Error occurred, server shut down") self.shutdown_server() self.files_stat = files_stat logger.info("^C received, shutting down watcher") self.shutdown_watcher()
watch files for changes, if changed, rebuild blog. this thread will quit if the main process ends
6,709
def vertex_normals(self): assert hasattr(self.faces_sparse, ) vertex_normals = geometry.mean_vertex_normals( vertex_count=len(self.vertices), faces=self.faces, face_normals=self.face_normals, sparse=self.faces_sparse) return vertex_normals
The vertex normals of the mesh. If the normals were loaded we check to make sure we have the same number of vertex normals and vertices before returning them. If there are no vertex normals defined or a shape mismatch we calculate the vertex normals from the mean normals of the faces the vertex is used in. Returns ---------- vertex_normals : (n,3) float Represents the surface normal at each vertex. Where n == len(self.vertices)
6,710
def str_rstrip(x, to_strip=None): sl = _to_string_sequence(x).rstrip( if to_strip is None else to_strip) if to_strip != else x return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
Remove trailing characters from a string sample. :param str to_strip: The string to be removed :returns: an expression containing the modified string column. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rstrip(to_strip='ing') Expression = str_rstrip(text, to_strip='ing') Length: 5 dtype: str (expression) --------------------------------- 0 Someth 1 very pretty 2 is com 3 our 4 way.
6,711
def create_cookie(self, delete=None): value = if delete else self._serialize(self.data) split_url = parse.urlsplit(self.adapter.url) domain = split_url.netloc.split()[0] if not in domain: template = else: template = ( ) return template.format( name=self.name, value=value, domain=domain, path=split_url.path, secure= if self.secure else , expires= if delete else )
Creates the value for ``Set-Cookie`` HTTP header. :param bool delete: If ``True`` the cookie value will be ``deleted`` and the Expires value will be ``Thu, 01-Jan-1970 00:00:01 GMT``.
6,712
def get_shark_field(self, fields): out = super(BACK, self).get_shark_field(fields) out.update({: self.acked_seqs, : self.bitmap_str}) return out
:fields: str[]
6,713
def get_config(self): self.update_network_description() result = dict(self.__dict__) result[] = None result[] = None result[] = None result[] = None return result
serialize to a dict all attributes except model weights Returns ------- dict
6,714
def _get_channel(self): channel = self._transport.open_session() channel.set_combine_stderr(True) channel.get_pty() return channel
Returns a channel according to if there is a redirection to do or not.
6,715
def build_trips(pfeed, routes, service_by_window): routes = pd.merge(routes[[, ]], pfeed.frequencies) routes = pd.merge(routes, pfeed.service_windows) rows = [] for index, row in routes.iterrows(): shape = row[] route = row[] window = row[] start, end = row[[, ]].values duration = get_duration(start, end, ) frequency = row[] if not frequency: continue
Given a ProtoFeed and its corresponding routes (DataFrame), service-by-window (dictionary), return a DataFrame representing ``trips.txt``. Trip IDs encode route, direction, and service window information to make it easy to compute stop times later.
6,716
def _parse_launch_error(data): return LaunchFailure( data.get(ERROR_REASON, None), data.get(APP_ID), data.get(REQUEST_ID), )
Parses a LAUNCH_ERROR message and returns a LaunchFailure object. :type data: dict :rtype: LaunchFailure
6,717
def add_sparse_covariance_matrix(self,x,y,names,iidx,jidx,data): if not isinstance(x, np.ndarray): x = np.array(x) if not isinstance(y, np.ndarray): y = np.array(y) assert x.shape[0] == y.shape[0] assert x.shape[0] == len(names) for i,name in enumerate(names): iidx.append(i) jidx.append(i) data.append(self.contribution) for i1, (n1, x1, y1) in enumerate(zip(names, x, y)): dx = x1 - x[i1 + 1:] dy = y1 - y[i1 + 1:] dxx, dyy = self._apply_rotation(dx, dy) h = np.sqrt(dxx * dxx + dyy * dyy) h[h < 0.0] = 0.0 cv = self._h_function(h) if np.any(np.isnan(cv)): raise Exception("nans in cv for i1 {0}".format(i1)) j = list(np.arange(i1+1,x.shape[0])) i = [i1] * len(j) iidx.extend(i) jidx.extend(j) data.extend(list(cv)) iidx.extend(j) jidx.extend(i) data.extend(list(cv))
build a pyemu.SparseMatrix instance implied by Vario2d Parameters ---------- x : (iterable of floats) x-coordinate locations y : (iterable of floats) y-coordinate locations names : (iterable of str) names of locations. If None, cov must not be None iidx : 1-D ndarray i row indices jidx : 1-D ndarray j col indices data : 1-D ndarray nonzero entries Returns ------- None
6,718
def _readBlock(self): if self.interrupted or self.fp is None: if self.debug: log.msg() return True length = self.blocksize if self.bytes_remaining is not None and length > self.bytes_remaining: length = self.bytes_remaining if length <= 0: if self.stderr is None: self.stderr = "Maximum filesize reached, truncating file ".format( self.path) self.rc = 1 return True else: d = self.reader.callRemote(, length) d.addCallback(self._writeData) return d
Read a block of data from the remote reader.
6,719
def datasetsBM(host=biomart_host): stdout_ = sys.stdout stream = StringIO() sys.stdout = stream server = BiomartServer(biomart_host) server.show_datasets() sys.stdout = stdout_ variable = stream.getvalue() v=variable.replace("{"," ") v=v.replace("}"," ") v=v.replace(": ","\t") print(v)
Lists BioMart datasets. :param host: address of the host server, default='http://www.ensembl.org/biomart' :returns: nothing
6,720
def generate_unit_squares(image_width, image_height): for x in range(image_width): for y in range(image_height): yield [(x, y), (x + 1, y), (x + 1, y + 1), (x, y + 1)]
Generate coordinates for a tiling of unit squares.
6,721
def _get_baremetal_connections(self, port, only_active_switch=False, from_segment=False): connections = [] is_native = False if self.trunk.is_trunk_subport(port) else True all_link_info = port[bc.portbindings.PROFILE][] for link_info in all_link_info: intf_type, port = nexus_help.split_interface_name( link_info[]) switch_info = self._get_baremetal_switch_info( link_info) if not switch_info: continue switch_ip = switch_info[] if not self._switch_defined(switch_ip): continue if (only_active_switch and not self.is_switch_active(switch_ip)): continue ch_grp = 0 if not from_segment: try: reserved = nxos_db.get_switch_if_host_mappings( switch_ip, nexus_help.format_interface_name( intf_type, port)) if reserved[0].ch_grp > 0: ch_grp = reserved[0].ch_grp intf_type, port = nexus_help.split_interface_name( , ch_grp) except excep.NexusHostMappingNotFound: pass connections.append((switch_ip, intf_type, port, is_native, ch_grp)) return connections
Get switch ips and interfaces from baremetal transaction. This method is used to extract switch/interface information from transactions where VNIC_TYPE is baremetal. :param port: Received port transaction :param only_active_switch: Indicator for selecting connections with switches that are active :param from_segment: only return interfaces from the segment/transaction as opposed to say port channels which are learned. :Returns: list of switch_ip, intf_type, port_id, is_native
6,722
def vmomentsurfacemass(self,R,n,m,t=0.,nsigma=None,deg=False, epsrel=1.e-02,epsabs=1.e-05,phi=0., grid=None,gridpoints=101,returnGrid=False, hierarchgrid=False,nlevels=2, print_progress=False, integrate_method=, deriv=None): if not grid is None and isinstance(grid,evolveddiskdfGrid): if returnGrid: return (self._vmomentsurfacemassGrid(n,m,grid),grid) else: return self._vmomentsurfacemassGrid(n,m,grid) elif not grid is None \ and isinstance(grid,evolveddiskdfHierarchicalGrid): if returnGrid: return (self._vmomentsurfacemassHierarchicalGrid(n,m,grid), grid) else: return self._vmomentsurfacemassHierarchicalGrid(n,m,grid) if deg: az= phi*_DEGTORAD else: az= phi if nsigma is None: nsigma= _NSIGMA if _PROFILE: start= time_module.time() if hasattr(self._initdf,) \ and hasattr(self._initdf,) \ and hasattr(self._initdf,) \ and hasattr(self._initdf,): sigmaR1= nu.sqrt(self._initdf._estimateSigmaR2(R,phi=az)) sigmaT1= nu.sqrt(self._initdf._estimateSigmaT2(R,phi=az)) meanvR= self._initdf._estimatemeanvR(R,phi=az) meanvT= self._initdf._estimatemeanvT(R,phi=az) else: warnings.warn("No etc. functions found for initdf in evolveddf; thus using potentially slow sigmaR2 etc functions", galpyWarning) sigmaR1= nu.sqrt(self._initdf.sigmaR2(R,phi=az,use_physical=False)) sigmaT1= nu.sqrt(self._initdf.sigmaT2(R,phi=az,use_physical=False)) meanvR= self._initdf.meanvR(R,phi=az,use_physical=False) meanvT= self._initdf.meanvT(R,phi=az,use_physical=False) if _PROFILE: setup_time= (time_module.time()-start) if not grid is None and isinstance(grid,bool) and grid: if not hierarchgrid: if _PROFILE: start= time_module.time() grido= self._buildvgrid(R,az,nsigma,t, sigmaR1,sigmaT1,meanvR,meanvT, gridpoints,print_progress, integrate_method,deriv) if _PROFILE: grid_time= (time_module.time()-start) print(setup_time/(setup_time+grid_time), \ grid_time/(setup_time+grid_time), \ setup_time+grid_time) if returnGrid: return (self._vmomentsurfacemassGrid(n,m,grido),grido) else: return self._vmomentsurfacemassGrid(n,m,grido) else: grido= evolveddiskdfHierarchicalGrid(self,R,az,nsigma,t, sigmaR1,sigmaT1,meanvR, meanvT, gridpoints,nlevels,deriv, print_progress=print_progress) if returnGrid: return (self._vmomentsurfacemassHierarchicalGrid(n,m, grido), grido) else: return self._vmomentsurfacemassHierarchicalGrid(n,m,grido) initvmoment= self._initdf.vmomentsurfacemass(R,n,m,nsigma=nsigma, phi=phi) if initvmoment == 0.: initvmoment= 1. norm= sigmaR1**(n+1)*sigmaT1**(m+1)*initvmoment if isinstance(t,(list,nu.ndarray)): raise IOError("list of times is only supported with grid-based calculation") return dblquad(_vmomentsurfaceIntegrand, meanvT/sigmaT1-nsigma, meanvT/sigmaT1+nsigma, lambda x: meanvR/sigmaR1 -nu.sqrt(nsigma**2.-(x-meanvT/sigmaT1)**2.), lambda x: meanvR/sigmaR1 +nu.sqrt(nsigma**2.-(x-meanvT/sigmaT1)**2.), (R,az,self,n,m,sigmaR1,sigmaT1,t,initvmoment), epsrel=epsrel,epsabs=epsabs)[0]*norm
NAME: vmomentsurfacemass PURPOSE: calculate the an arbitrary moment of the velocity distribution at (R,phi) times the surfacmass INPUT: R - radius at which to calculate the moment (in natural units) phi= azimuth (rad unless deg=True) n - vR^n m - vT^m t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous, but not too generous) deg= azimuth is in degree (default=False) epsrel, epsabs - scipy.integrate keywords (the integration calculates the ratio of this vmoment to that of the initial DF) grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid; if this was created for a list of times, moments are calculated for each time gridpoints= number of points to use for the grid in 1D (default=101) returnGrid= if True, return the grid object (default=False) hierarchgrid= if True, use a hierarchical grid (default=False) nlevels= number of hierarchical levels for the hierarchical grid print_progress= if True, print progress updates integrate_method= orbit.integrate method argument deriv= None, 'R', or 'phi': calculates derivative of the moment wrt R or phi **onnly with grid options** OUTPUT: <vR^n vT^m x surface-mass> at R,phi (no support for units) COMMENT: grid-based calculation is the only one that is heavily tested (although the test suite also tests the direct calculation) HISTORY: 2011-03-30 - Written - Bovy (NYU)
6,723
def get_arguments(self): MetricCommon.get_arguments(self) if self.args.metricName is not None: self.metricName = self.args.metricName if self.args.displayName is not None: self.displayName = self.args.displayName if self.args.displayNameShort is not None: self.displayNameShort = self.args.displayNameShort if self.args.description is not None: self.description = self.args.description if self.args.aggregate is not None: self.aggregate = self.args.aggregate if self.args.unit is not None: self.unit = self.args.unit if self.args.resolution is not None: self.resolution = self.args.resolution if self.args.isDisabled is not None: self.isDisabled = self.args.isDisabled if self.args.type is not None: self.type = self.args.type data = {} if self.metricName is not None: data[] = self.metricName if self.displayName is not None: data[] = self.displayName if self.displayNameShort is not None: data[] = self.displayNameShort if self.description is not None: data[] = self.description if self.aggregate is not None: data[] = self.aggregate if self.unit is not None: data[] = self.unit if self.resolution is not None: data[] = self.resolution if self.isDisabled is not None: data[] = True if self.isDisabled == else False if self.type is not None: data[] = self.type self.path = "v1/metrics/{0}".format(self.metricName) self.data = json.dumps(data, sort_keys=True) self.headers = {: , "Accept": "application/json"}
Extracts the specific arguments of this CLI
6,724
def iter(self, order=, sort=True): from casacore.tables import tableiter return tableiter(self._table, [self._column], order, sort)
Return a :class:`tableiter` object on this column.
6,725
def content(self): content = self._get_content() if bool(content and in content): return content return safe_unicode(content)
Returns lazily content of the FileNode. If possible, would try to decode content from UTF-8.
6,726
def subscribe(self, objectID, varIDs=(tc.VAR_ROAD_ID, tc.VAR_LANEPOSITION), begin=0, end=2**31 - 1): Domain.subscribe(self, objectID, varIDs, begin, end)
subscribe(string, list(integer), int, int) -> None Subscribe to one or more object values for the given interval.
6,727
def apply_trend_constraint(self, limit, dt, distribution_skip=False, **kwargs): if type(limit) != Quantity: limit = limit * u.m/u.s if type(dt) != Quantity: dt = dt * u.day dRVs = np.absolute(self.dRV(dt)) c1 = UpperLimit(dRVs, limit) c2 = LowerLimit(self.Plong, dt*4) self.apply_constraint(JointConstraintOr(c1,c2,name=, Ps=self.Plong,dRVs=dRVs), distribution_skip=distribution_skip, **kwargs)
Constrains change in RV to be less than limit over time dt. Only works if ``dRV`` and ``Plong`` attributes are defined for population. :param limit: Radial velocity limit on trend. Must be :class:`astropy.units.Quantity` object, or else interpreted as m/s. :param dt: Time baseline of RV observations. Must be :class:`astropy.units.Quantity` object; else interpreted as days. :param distribution_skip: This is by default ``True``. *To be honest, I'm not exactly sure why. Might be important, might not (don't remember).* :param **kwargs: Additional keyword arguments passed to :func:`StarPopulation.apply_constraint`.
6,728
def _set_get_media_detail(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=get_media_detail.get_media_detail, is_leaf=True, yang_name="get-media-detail", rest_name="get-media-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "rpc", : , }) self.__get_media_detail = t if hasattr(self, ): self._set()
Setter method for get_media_detail, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_get_media_detail is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_get_media_detail() directly. YANG Description: This is a function that serves to return the media properities of all the interfaces of the managed entity.
6,729
def read_file(file_path_name): with io.open(os.path.join(os.path.dirname(__file__), file_path_name), mode=, encoding=) as fd: return fd.read()
Read the content of the specified file. @param file_path_name: path and name of the file to read. @return: content of the specified file.
6,730
def upload(client, source_dir): print() print() print() listings_folder = os.path.join(source_dir, ) langfolders = filter(os.path.isdir, list_dir_abspath(listings_folder)) for language_dir in langfolders: language = os.path.basename(language_dir) with open(os.path.join(language_dir, )) as listings_file: listing = json.load(listings_file) listing_response = client.update( , language=language, body=listing) print( % listing_response[])
Upload listing files in source_dir. folder herachy.
6,731
def _generateForTokenSecurity(self, username, password, tokenUrl, expiration=None, client=): query_dict = {: username, : password, :str(_defaultTokenExpiration), : client, : } if client == "referer": query_dict[] = self._referer_url if expiration is not None: query_dict[] = expiration secHandler = None if self.cookiejar is not None: secHandler = self if secHandler is not None: secHandler._method = "HANDLER" token = self._post(url=tokenUrl, param_dict=query_dict, securityHandler=secHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if self.cookiejar is not None: if secHandler is not None: secHandler._method = "TOKEN" if in token: self._token = None self._token_created_on = None self._token_expires_on = None self._expires_in = None return token elif in token: self._token = None self._token_created_on = None self._token_expires_on = None self._expires_in = None return token else: self._token = token[] self._token_created_on = datetime.datetime.now() self._token_expires_on = datetime.datetime.fromtimestamp(token[] /1000) - \ datetime.timedelta(seconds=1) self._expires_in = (self._token_expires_on - self._token_created_on).total_seconds() return token[]
generates a token for a feature service
6,732
def _fluent_params(self, fluents, ordering) -> FluentParamsList: variables = [] for fluent_id in ordering: fluent = fluents[fluent_id] param_types = fluent.param_types objects = () names = [] if param_types is None: names = [fluent.name] else: objects = tuple(self.object_table[ptype][] for ptype in param_types) for values in itertools.product(*objects): values = .join(values) var_name = .format(fluent.name, values) names.append(var_name) variables.append((fluent_id, names)) return tuple(variables)
Returns the instantiated `fluents` for the given `ordering`. For each fluent in `fluents`, it instantiates each parameter type w.r.t. the contents of the object table. Returns: Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name and a list of instantiated fluents represented as strings.
6,733
def hide(self): self.tk.withdraw() self._visible = False if self._modal: self.tk.grab_release()
Hide the window.
6,734
def data(self, index, role=Qt.DisplayRole): if not index.isValid(): return None def convertValue(row, col, columnDtype): value = None if columnDtype == object: value = self._dataFrame.ix[row, col] elif columnDtype in self._floatDtypes: value = round(float(self._dataFrame.ix[row, col]), self._float_precisions[str(columnDtype)]) elif columnDtype in self._intDtypes: value = int(self._dataFrame.ix[row, col]) elif columnDtype in self._boolDtypes: value = bool(self._dataFrame.ix[row, col]) elif columnDtype in self._dateDtypes: value = pandas.Timestamp(self._dataFrame.ix[row, col]) value = QtCore.QDateTime.fromString(str(value), self.timestampFormat) return value row = self._dataFrame.index[index.row()] col = self._dataFrame.columns[index.column()] columnDtype = self._dataFrame[col].dtype if role == Qt.DisplayRole: if columnDtype == numpy.bool: result = self._dataFrame.ix[row, col] else: result = convertValue(row, col, columnDtype) elif role == Qt.EditRole: result = convertValue(row, col, columnDtype) elif role == Qt.CheckStateRole: if columnDtype == numpy.bool_: if convertValue(row, col, columnDtype): result = Qt.Checked else: result = Qt.Unchecked else: result = None elif role == DATAFRAME_ROLE: result = self._dataFrame.ix[row, col] else: result = None return result
return data depending on index, Qt::ItemDataRole and data type of the column. Args: index (QtCore.QModelIndex): Index to define column and row you want to return role (Qt::ItemDataRole): Define which data you want to return. Returns: None if index is invalid None if role is none of: DisplayRole, EditRole, CheckStateRole, DATAFRAME_ROLE if role DisplayRole: unmodified _dataFrame value if column dtype is object (string or unicode). _dataFrame value as int or long if column dtype is in _intDtypes. _dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision). None if column dtype is in _boolDtypes. QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template. if role EditRole: unmodified _dataFrame value if column dtype is object (string or unicode). _dataFrame value as int or long if column dtype is in _intDtypes. _dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision). _dataFrame value as bool if column dtype is in _boolDtypes. QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template. if role CheckStateRole: Qt.Checked or Qt.Unchecked if dtype is numpy.bool_ otherwise None for all other dtypes. if role DATAFRAME_ROLE: unmodified _dataFrame value. raises TypeError if an unhandled dtype is found in column.
6,735
def orchestrate_high(data, test=None, queue=False, pillar=None, **kwargs): { stage_one: {salt.state: [{tgt: "db*"}, {sls: postgres_setup}]}, stage_two: {salt.state: [{tgt: "web*"}, {sls: apache_setup}, { require: [{salt: stage_one}], }]}, } if pillar is not None and not isinstance(pillar, dict): raise SaltInvocationError( ) __opts__[] = minion = salt.minion.MasterMinion(__opts__) running = minion.functions[]( data, test=None, queue=False, pillar=pillar, **kwargs) ret = {minion.opts[]: running} __jid_event__.fire_event({: ret, : }, ) return ret
Execute a single state orchestration routine .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt-run state.orchestrate_high '{ stage_one: {salt.state: [{tgt: "db*"}, {sls: postgres_setup}]}, stage_two: {salt.state: [{tgt: "web*"}, {sls: apache_setup}, { require: [{salt: stage_one}], }]}, }'
6,736
def to_dict(self, *, include_keys=None, exclude_keys=None, use_default_excludes=True): data = self.__dict__ if include_keys: return pick(data, include_keys, transform=self._other_to_dict) else: skeys = self.exclude_keys_serialize if use_default_excludes else None ekeys = exclude_keys return exclude( data, lambda k: (skeys is not None and k in skeys) or (ekeys is not None and k in ekeys), transform=self._other_to_dict)
Converts the class to a dictionary. :include_keys: if not None, only the attrs given will be included. :exclude_keys: if not None, all attrs except those listed will be included, with respect to use_default_excludes. :use_default_excludes: if True, then the class-level exclude_keys_serialize will be combined with exclude_keys if given, or used in place of exlcude_keys if not given.
6,737
def compute(cls, observation, prediction, key=None): assert isinstance(observation, (dict, float, int, pq.Quantity)) assert isinstance(prediction, (dict, float, int, pq.Quantity)) obs, pred = cls.extract_means_or_values(observation, prediction, key=key) value = pred / obs value = utils.assert_dimensionless(value) return RatioScore(value)
Compute a ratio from an observation and a prediction.
6,738
def _projection_to_paths(cls, root_key, projection): if in projection: return True inclusive = True sub_projection = {} for key, value in projection.items(): if key in [, ]: continue if key.startswith(): sub_projection[root_key] = {key: value} inclusive = False continue sub_key = root_key + + key if isinstance(value, dict): sub_value = cls._projection_to_paths(sub_key, value) if isinstance(sub_value, dict): sub_projection.update(sub_value) else: sub_projection[sub_key] = True else: sub_projection[sub_key] = True inclusive = False if inclusive: return True return sub_projection
Expand a $sub/$sub. projection to a single projection of True (if inclusive) or a map of full paths (e.g `employee.company.tel`).
6,739
def _process_priv_part(perms): _tmp = {} previous = None for perm in perms: if previous is None: _tmp[_PRIVILEGES_MAP[perm]] = False previous = _PRIVILEGES_MAP[perm] else: if perm == : _tmp[previous] = True else: _tmp[_PRIVILEGES_MAP[perm]] = False previous = _PRIVILEGES_MAP[perm] return _tmp
Process part
6,740
def get_lib_volume_mounts(base_lib_name, assembled_specs): volumes = [_get_lib_repo_volume_mount(assembled_specs[][base_lib_name])] volumes.append(get_command_files_volume_mount(base_lib_name, test=True)) for lib_name in assembled_specs[][base_lib_name][][]: lib_spec = assembled_specs[][lib_name] volumes.append(_get_lib_repo_volume_mount(lib_spec)) return volumes
Returns a list of the formatted volume specs for a lib
6,741
def getcomments(object): try: lines, lnum = findsource(object) except IOError: return None if ismodule(object): start = 0 if lines and lines[0][:2] == : start = 1 while start < len(lines) and string.strip(lines[start]) in [, ]: start = start + 1 if start < len(lines) and lines[start][:1] == : comments = [] end = start while end < len(lines) and lines[end][:1] == : comments.append(string.expandtabs(lines[end])) end = end + 1 return string.join(comments, ) elif lnum > 0: indent = indentsize(lines[lnum]) end = lnum - 1 if end >= 0 and string.lstrip(lines[end])[:1] == and \ indentsize(lines[end]) == indent: comments = [string.lstrip(string.expandtabs(lines[end]))] if end > 0: end = end - 1 comment = string.lstrip(string.expandtabs(lines[end])) while comment[:1] == and indentsize(lines[end]) == indent: comments[:0] = [comment] end = end - 1 if end < 0: break comment = string.lstrip(string.expandtabs(lines[end])) while comments and string.strip(comments[0]) == : comments[:1] = [] while comments and string.strip(comments[-1]) == : comments[-1:] = [] return string.join(comments, )
Get lines of comments immediately preceding an object's source code.
6,742
def on_train_begin(self, **kwargs): "Create the optimizers for the generator and critic if necessary, initialize smootheners." if not getattr(self,,None): self.opt_gen = self.opt.new([nn.Sequential(*flatten_model(self.generator))]) else: self.opt_gen.lr,self.opt_gen.wd = self.opt.lr,self.opt.wd if not getattr(self,,None): self.opt_critic = self.opt.new([nn.Sequential(*flatten_model(self.critic))]) else: self.opt_critic.lr,self.opt_critic.wd = self.opt.lr,self.opt.wd self.gen_mode = self.gen_first self.switch(self.gen_mode) self.closses,self.glosses = [],[] self.smoothenerG,self.smoothenerC = SmoothenValue(self.beta),SmoothenValue(self.beta) self.recorder.add_metric_names([, ]) self.imgs,self.titles = [],[]
Create the optimizers for the generator and critic if necessary, initialize smootheners.
6,743
def is_downloaded(self, file_path): if os.path.exists(file_path): self.chatbot.logger.info() return True return False
Check if the data file is already downloaded.
6,744
def genlet(generator_function=None, prime=True): if generator_function is None: return GeneratorLink.wraplet(prime=prime) elif not callable(generator_function): return GeneratorLink.wraplet(prime=generator_function) return GeneratorLink.wraplet(prime=prime)(generator_function)
Decorator to convert a generator function to a :py:class:`~chainlink.ChainLink` :param generator_function: the generator function to convert :type generator_function: generator :param prime: advance the generator to the next/first yield :type prime: bool When used as a decorator, this function can also be called with and without keywords. .. code:: python @genlet def pingpong(): "Chainlet that passes on its value" last = yield while True: last = yield last @genlet(prime=True) def produce(): "Chainlet that produces a value" while True: yield time.time() @genlet(True) def read(iterable): "Chainlet that reads from an iterable" for item in iterable: yield item
6,745
def add(self, item, position=5): if item in self.items: return self.items[item] = position self._add_dep(item) self.order = None self.changed(code_changed=True)
Add an item to the list unless it is already present. If the item is an expression, then a semicolon will be appended to it in the final compiled code.
6,746
def set_jinja2_silent_none(config): config.commit() jinja2_env = config.get_jinja2_environment() jinja2_env.finalize = _silent_none
if variable is None print '' instead of 'None'
6,747
def _parse_ignores(self): error_message = ( colorama.Fore.RED + "{} does not appear to be a valid pylintrc file".format(self.rcfile) + colorama.Fore.RESET ) if not os.path.isfile(self.rcfile): if not self._is_using_default_rcfile(): print(error_message) sys.exit(1) else: return config = configparser.ConfigParser() try: config.read(self.rcfile) except configparser.MissingSectionHeaderError: print(error_message) sys.exit(1) if config.has_section("MASTER") and config.get("MASTER", "ignore"): self.ignore_folders += config.get("MASTER", "ignore").split(",")
Parse the ignores setting from the pylintrc file if available.
6,748
def t_stringdollar_rbrace(self, t): r t.lexer.braces -= 1 if t.lexer.braces == 0: t.lexer.begin()
r'\}
6,749
def perform_update(self, serializer): instance = serializer.save() instance.history.create(data=instance.data)
creates a record in the `bulbs.promotion.PZoneHistory` :param obj: the instance saved :param created: boolean expressing if the object was newly created (`False` if updated)
6,750
def _compute_attenuation(self, rup, dists, imt, C): vec = np.ones(len(dists.rrup)) a1 = (np.log10(np.sqrt(dists.rrup ** 2.0 + C[] ** 2.0)), np.log10(70. * vec)) a = np.column_stack([a1[0], a1[1]]) b3 = (np.log10(np.sqrt(dists.rrup ** 2.0 + C[] ** 2.0) / (70. * vec)), np.log10((140. / 70.) * vec)) b2 = np.column_stack([b3[0], b3[1]]) b1 = ([np.min(b2, axis=1), 0. * vec]) b = np.column_stack([b1[0], b1[1]]) c1 = (np.log10(np.sqrt(dists.rrup ** 2.0 + C[] ** 2.0) / (140.) * vec), 0. * vec) c = np.column_stack([c1[0], c1[1]]) return (((C[] + C[] * rup.mag) * np.min(a, axis=1)) + ((C[] + C[] * rup.mag) * np.max(b, axis=1)) + ((C[] + C[] * rup.mag) * np.max(c, axis=1)))
Compute the second term of the equation described on p. 1866: " [(c4 + c5 * M) * min{ log10(R), log10(70.) }] + [(c4 + c5 * M) * max{ min{ log10(R/70.), log10(140./70.) }, 0.}] + [(c8 + c9 * M) * max{ log10(R/140.), 0}] "
6,751
def at(self, instant): for event in self: if event.begin <= instant <= event.end: yield event
Iterates (in chronological order) over all events that are occuring during `instant`. Args: instant (Arrow object)
6,752
def show_hide(self, *args): log.debug("Show_hide called") if self.forceHide: self.forceHide = False return if not HidePrevention(self.window).may_hide(): return if not self.win_prepare(): return if not self.window.get_property(): log.info("Showing the terminal") self.show() self.set_terminal_focus() return log.info("Hiding the terminal") self.hide()
Toggles the main window visibility
6,753
def bank_account_number(self): start = get_iban_spec(self.country_code).bban_split_pos + 4 return self._id[start:]
Return the IBAN's Bank Account Number.
6,754
def stream_file(self, url, folder=None, filename=None, overwrite=False): path = self.get_path_for_url(url, folder, filename, overwrite) f = None try: f = open(path, ) for chunk in self.response.iter_content(chunk_size=10240): if chunk: f.write(chunk) f.flush() return f.name except Exception as e: raisefrom(DownloadError, % url, e) finally: if f: f.close()
Stream file from url and store in provided folder or temporary folder if no folder supplied. Must call setup method first. Args: url (str): URL to download filename (Optional[str]): Filename to use for downloaded file. Defaults to None (derive from the url). folder (Optional[str]): Folder to download it to. Defaults to None (temporary folder). overwrite (bool): Whether to overwrite existing file. Defaults to False. Returns: str: Path of downloaded file
6,755
def get_property(obj, name): if obj == None or name == None: return None names = name.split(".") if names == None or len(names) == 0: return None return RecursiveObjectReader._perform_get_property(obj, names, 0)
Recursively gets value of object or its subobjects property specified by its name. The object can be a user defined object, map or array. The property name correspondently must be object property, map key or array index. :param obj: an object to read property from. :param name: a name of the property to get. :return: the property value or null if property doesn't exist or introspection failed.
6,756
def finish( self, width=1, color=None, fill=None, roundCap=False, dashes=None, even_odd=False, morph=None, closePath=True ): if self.draw_cont == "": return color_str = ColorCode(color, "c") fill_str = ColorCode(fill, "f") if width != 1: self.draw_cont += "%g w\n" % width if roundCap: self.draw_cont += "%i J %i j\n" % (roundCap, roundCap) if dashes is not None and len(dashes) > 0: self.draw_cont += "%s d\n" % dashes if closePath: self.draw_cont += "h\n" self.lastPoint = None if color is not None: self.draw_cont += color_str if fill is not None: self.draw_cont += fill_str if not even_odd: self.draw_cont += "B\n" else: self.draw_cont += "B*\n" else: self.draw_cont += "S\n" if CheckMorph(morph): m1 = Matrix(1, 0, 0, 1, morph[0].x + self.x, self.height - morph[0].y - self.y) mat = ~m1 * morph[1] * m1 self.draw_cont = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat) + self.draw_cont self.totalcont += "\nq\n" + self.draw_cont + "Q\n" self.draw_cont = "" self.lastPoint = None return
Finish the current drawing segment. Notes: Apply stroke and fill colors, dashes, line style and width, or morphing. Also determines whether any open path should be closed by a connecting line to its start point.
6,757
def write_flows_to_gssha_time_series_xys(self, path_to_output_file, series_name, series_id, river_index=None, river_id=None, date_search_start=None, date_search_end=None, daily=False, filter_mode="mean"): if river_id is not None: river_index = self.get_river_index(river_id) elif river_id is None and river_index is None: raise ValueError(" Need reach id or reach index ...") self.raise_time_valid() qout_df = self.get_qout_index(river_index, date_search_start=date_search_start, date_search_end=date_search_end, daily=daily, filter_mode=filter_mode, as_dataframe=True) with open_csv(path_to_output_file, ) as out_ts: out_ts.write("XYS {0} {1} \"{2}\"\r\n".format(series_id, len(qout_df.index), series_name)) for index, pd_row in qout_df.iterrows(): date_str = index.strftime("%m/%d/%Y %I:%M:%S %p") out_ts.write("\"{0}\" {1:.5f}\n".format(date_str, pd_row[0]))
Write out RAPID output to GSSHA WMS time series xys file. Parameters ---------- path_to_output_file: str Path to the output xys file. series_name: str The name for the series. series_id: int The ID to give the series. river_index: :obj:`datetime.datetime`, optional This is the index of the river in the file you want the streamflow for. river_id: :obj:`datetime.datetime`, optional This is the river ID that you want the streamflow for. date_search_start: :obj:`datetime.datetime`, optional This is a datetime object with the date of the minimum date for starting. date_search_end: :obj:`datetime.datetime`, optional This is a datetime object with the date of the maximum date for ending. daily: bool, optional If True and the file is CF-Compliant, write out daily flows. filter_mode: str, optional You can get the daily average "mean" or the maximum "max". Defauls is "mean". Example writing entire time series to file: .. code:: python from RAPIDpy import RAPIDDataset river_id = 3624735 path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: qout_nc.write_flows_to_gssha_time_series_xys( '/timeseries/Qout_{0}.xys'.format(river_id), series_name="RAPID_TO_GSSHA_{0}".format(river_id), series_id=34, river_id=river_id) Example writing entire time series as daily average to file: .. code:: python from RAPIDpy import RAPIDDataset river_id = 3624735 path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: # NOTE: Getting the river index is not necessary # this is just an example of how to use this river_index = qout_nc.get_river_index(river_id) # if file is CF compliant, you can write out daily average qout_nc.write_flows_to_gssha_time_series_xys( '/timeseries/Qout_daily.xys', series_name="RAPID_TO_GSSHA_{0}".format(river_id), series_id=34, river_index=river_index, daily=True) Example writing subset of time series as daily maximum to file: .. code:: python from datetime import datetime from RAPIDpy import RAPIDDataset river_id = 3624735 path_to_rapid_qout = '/path/to/Qout.nc' with RAPIDDataset(path_to_rapid_qout) as qout_nc: # NOTE: Getting the river index is not necessary # this is just an example of how to use this river_index = qout_nc.get_river_index(river_id) # if file is CF compliant, you can filter by date and # get daily values qout_nc.write_flows_to_gssha_time_series_xys( '/timeseries/Qout_daily_date_filter.xys', series_name="RAPID_TO_GSSHA_{0}".format(river_id), series_id=34, river_index=river_index, date_search_start=datetime(2002, 8, 31), date_search_end=datetime(2002, 9, 15), daily=True, filter_mode="max")
6,758
def authorize(self, me, state=None, next_url=None, scope=): redirect_url = flask.url_for( self.flask_endpoint_for_function(self._authorized_handler), _external=True) return self._start_indieauth( me, redirect_url, state or next_url, scope)
Authorize a user via Micropub. Args: me (string): the authing user's URL. if it does not begin with https?://, http:// will be prepended. state (string, optional): passed through the whole auth process, useful if you want to maintain some state, e.g. the starting page to return to when auth is complete. next_url (string, optional): deprecated and replaced by the more general "state". still here for backward compatibility. scope (string, optional): a space-separated string of micropub scopes. 'read' by default. Returns: a redirect to the user's specified authorization https://indieauth.com/auth if none is provided.
6,759
def get_notify_observers_kwargs(self): return { : self._u_new, : self._x_new, : self._y_new, : self._z, : self._xi, : self._sigma, : self._t_new, : self.idx, }
Return the mapping between the metrics call and the iterated variables. Return ---------- notify_observers_kwargs: dict, the mapping between the iterated variables.
6,760
def cmp(self,junc,tolerance=0): if self.overlaps(junc,tolerance): return 0 if self.left.chr == junc.right.chr: if self.left.start > junc.right.start: return -1 if self.right.chr == junc.left.chr: if self.right.start < junc.right.start: return 1 return 2
output comparison and allow for tolerance if desired * -1 if junc comes before self * 1 if junc comes after self * 0 if overlaps * 2 if else :param junc: :param tolerance: optional search space (default=0, no tolerance) :type junc: Junction :type tolerance: int :return: value of comparison :rtype: int
6,761
def fuzzybreaks(scale, breaks=None, boundary=None, binwidth=None, bins=30, right=True): if isinstance(scale, scale_discrete): breaks = scale.get_breaks() return -0.5 + np.arange(1, len(breaks)+2) else: if breaks is not None: breaks = scale.transform(breaks) if breaks is not None: return breaks recompute_bins = binwidth is not None srange = scale.limits if binwidth is None or np.isnan(binwidth): binwidth = (srange[1]-srange[0]) / bins if boundary is None or np.isnan(boundary): boundary = round_any(srange[0], binwidth, np.floor) if recompute_bins: bins = np.int(np.ceil((srange[1]-boundary)/binwidth)) breaks = np.arange(boundary, srange[1]+binwidth, binwidth) return _adjust_breaks(breaks, right)
Compute fuzzy breaks For a continuous scale, fuzzybreaks "preserve" the range of the scale. The fuzzing is close to numerical roundoff and is visually imperceptible. Parameters ---------- scale : scale Scale breaks : array_like Sequence of break points. If provided and the scale is not discrete, they are returned. boundary : float First break. If `None` a suitable on is computed using the range of the scale and the binwidth. binwidth : float Separation between the breaks bins : int Number of bins right : bool If `True` the right edges of the bins are part of the bin. If `False` then the left edges of the bins are part of the bin. Returns ------- out : array_like
6,762
def from_pypirc(pypi_repository): ret = {} pypirc_locations = PYPIRC_LOCATIONS for pypirc_path in pypirc_locations: pypirc_path = os.path.expanduser(pypirc_path) if os.path.isfile(pypirc_path): parser = configparser.SafeConfigParser() parser.read(pypirc_path) if not in parser.sections(): continue if not in parser.options(): continue if pypi_repository not in parser.get(, ): continue if pypi_repository in parser.sections(): for option in parser.options(pypi_repository): ret[option] = parser.get(pypi_repository, option) if not ret: raise ConfigError( .format(pypi_repository) + ) return ret
Load configuration from .pypirc file, cached to only run once
6,763
def set_gae_attributes(span): for env_var, attribute_key in GAE_ATTRIBUTES.items(): attribute_value = os.environ.get(env_var) if attribute_value is not None: pair = {attribute_key: attribute_value} pair_attrs = Attributes(pair)\ .format_attributes_json()\ .get() _update_attr_map(span, pair_attrs)
Set the GAE environment common attributes.
6,764
def get_render(name, data, trans=): translation.activate(trans) config = loader.get_template(name) result = config.render(data).replace(, ) translation.deactivate() return result
Render string based on template :param name: -- full template name :type name: str,unicode :param data: -- dict of rendered vars :type data: dict :param trans: -- translation for render. Default 'en'. :type trans: str,unicode :return: -- rendered string :rtype: str,unicode
6,765
def findall(self, string): output = [] for match in self.pattern.findall(string): if hasattr(match, ): match = [match] self._list_add(output, self.run(match)) return output
Parse string, returning all outputs as parsed by functions
6,766
def _finishSphering(self): self._normOffset = self._samples.mean(axis=0) * -1.0 self._samples += self._normOffset variance = self._samples.var(axis=0) variance[numpy.where(variance == 0.0)] = 1.0 self._normScale = 1.0 / numpy.sqrt(variance) self._samples *= self._normScale for sampleIndex in range(len(self._labels)): self._knn.learn(self._samples[sampleIndex], self._labels[sampleIndex], self._partitions[sampleIndex])
Compute normalization constants for each feature dimension based on the collected training samples. Then normalize our training samples using these constants (so that each input dimension has mean and variance of zero and one, respectively.) Then feed these "sphered" training samples into the underlying SVM model.
6,767
def layout(self, slide): image = Image.new(, (WIDTH, HEIGHT), ) draw = ImageDraw.Draw(image) draw.font = self.font self.vertical_layout(draw, slide) self.horizontal_layout(draw, slide) return slide
Return layout information for slide
6,768
def main(): project_root = utils.get_project_root() infofile = os.path.join(project_root, "raw-datasets/info.yml") logging.info("Read ...", infofile) with open(infofile, ) as ymlfile: datasets = yaml.load(ymlfile) for dataset in datasets: local_path_file = os.path.join(project_root, dataset[]) i = 0 while not is_file_consistent(local_path_file, dataset[]) and i < 3: if os.path.isfile(local_path_file): local_file_size = os.path.getsize(local_path_file) logging.info("MD5 codes differ. ") logging.info("The file size of the downloaded file is %s.", utils.sizeof_fmt(local_file_size)) logging.info("Download the file ...", dataset[]) urllib.urlretrieve(dataset[], local_path_file) i += 1 if i < 10: logging.info("Found .", dataset[])
Main part of the download script.
6,769
def config(): config = get_config() print(.format(click.style(__version__, bold=True))) print(.format(click.style(str(config.endpoint), bold=True))) print(.format(click.style(config.version, bold=True))) print(.format(click.style(config.access_key, bold=True))) masked_skey = config.secret_key[:6] + ( * 24) + config.secret_key[-10:] print(.format(click.style(masked_skey, bold=True))) print(.format( click.style(config.hash_type, bold=True))) print(.format( click.style(str(config.skip_sslcert_validation), bold=True)))
Shows the current configuration.
6,770
async def retrieve(self, url, **kwargs): try: async with self.websession.request(, url, **kwargs) as res: if res.status != 200: raise Exception("Could not retrieve information from API") if res.content_type == : return await res.json() return await res.text() except aiohttp.ClientError as err: logging.error(err)
Issue API requests.
6,771
def parse_variable(self, variable): data = None if variable is not None: variable = variable.strip() if re.match(self._variable_match, variable): var = re.search(self._variable_parse, variable) data = { : var.group(0), : var.group(2), : var.group(3), : var.group(4), } return data
Method to parse an input or output variable. **Example Variable**:: #App:1234:output!String Args: variable (string): The variable name to parse. Returns: (dictionary): Result of parsed string.
6,772
def lowercase_to_camelcase(python_input, camelcase_input=None): if camelcase_input: if python_input.__class__ != camelcase_input.__class__: raise ValueError( % (camelcase_input.__class__, python_input.__class__)) if isinstance(python_input, dict): return _to_camelcase_dict(python_input, camelcase_input) elif isinstance(python_input, list): return _ingest_list(python_input, _to_camelcase_dict, camelcase_input) else: return python_input
a function to recursively convert data with lowercase key names into camelcase keys :param camelcase_input: list or dictionary with lowercase keys :param python_input: [optional] list or dictionary with default camelcase keys in output :return: dictionary with camelcase key names
6,773
def _is_number_match_OO(numobj1_in, numobj2_in): numobj1 = _copy_core_fields_only(numobj1_in) numobj2 = _copy_core_fields_only(numobj2_in) if (numobj1.extension is not None and numobj2.extension is not None and numobj1.extension != numobj2.extension): return MatchType.NO_MATCH country_code1 = numobj1.country_code country_code2 = numobj2.country_code if country_code1 != 0 and country_code2 != 0: if numobj1 == numobj2: return MatchType.EXACT_MATCH elif (country_code1 == country_code2 and _is_national_number_suffix_of_other(numobj1, numobj2)): return MatchType.SHORT_NSN_MATCH return MatchType.NO_MATCH numobj1.country_code = country_code2 if numobj1 == numobj2: return MatchType.NSN_MATCH if _is_national_number_suffix_of_other(numobj1, numobj2): return MatchType.SHORT_NSN_MATCH return MatchType.NO_MATCH
Takes two phone number objects and compares them for equality.
6,774
def _get_temperature(self, data): temp = (data[2] & ~(1 << 7)) + (data[3] / 100) sign = (data[2] >> 7) & 1 if sign == 0: return round(temp, 2) return round(-1 * temp, 2)
Return temperature in celsius
6,775
def _uptime_windows(): if hasattr(ctypes, ) and hasattr(ctypes.windll, ): lib = ctypes.windll.kernel32 else: try: lib = ctypes.CDLL() except (AttributeError, OSError): return None if hasattr(lib, ): lib.GetTickCount64.restype = ctypes.c_uint64 return lib.GetTickCount64() / 1000. if hasattr(lib, ): lib.GetTickCount.restype = ctypes.c_uint32 return lib.GetTickCount() / 1000. return None
Returns uptime in seconds or None, on Windows. Warning: may return incorrect answers after 49.7 days on versions older than Vista.
6,776
def validate_arguments(self, start_date, end_date, **kwargs): if set(kwargs) < set(self.required_filters): raise InvalidRequestInputError( .format(set(self.required_filters.keys()), self.query_name) )
Validate query arguments.
6,777
def decodeMessage(self, data): message = proto_pb2.Msg() message.ParseFromString(data) return message
Decode a protobuf message into a list of Tensor events
6,778
def zone_schedules_restore(self, filename): _LOGGER.info("Restoring schedules to ControlSystem %s (%s)...", self.systemId, self.location) _LOGGER.info("Reading from backup file: %s...", filename) with open(filename, ) as file_input: schedule_db = file_input.read() schedules = json.loads(schedule_db) for zone_id, zone_schedule in schedules.items(): name = zone_schedule[] zone_info = zone_schedule[] _LOGGER.info("Restoring schedule for: %s - %s...", zone_id, name) if self.hotwater and self.hotwater.zoneId == zone_id: self.hotwater.set_schedule(json.dumps(zone_info)) else: self.zones_by_id[zone_id].set_schedule( json.dumps(zone_info)) _LOGGER.info("Restore completed.")
Restore all zones on control system from the given file.
6,779
def signed_session(self, session=None): self.set_token() return super(MSIAuthentication, self).signed_session(session)
Create requests session with any required auth headers applied. If a session object is provided, configure it directly. Otherwise, create a new session and return it. :param session: The session to configure for authentication :type session: requests.Session :rtype: requests.Session
6,780
def UNTL_to_encodedUNTL(subject): subject = normalize_UNTL(subject) subject = subject.replace(, ) subject = subject.replace(, ) return subject
Normalize a UNTL subject heading to be used in SOLR.
6,781
def _gcs_delete(args, _): objects = _expand_list(args[]) objects.extend(_expand_list(args[])) errs = [] for obj in objects: try: bucket, key = google.datalab.storage._bucket.parse_name(obj) if bucket and key: gcs_object = google.datalab.storage.Object(bucket, key) if gcs_object.exists(): google.datalab.storage.Object(bucket, key).delete() else: errs.append("%s does not exist" % obj) elif bucket: gcs_bucket = google.datalab.storage.Bucket(bucket) if gcs_bucket.exists(): gcs_bucket.delete() else: errs.append("%s does not exist" % obj) else: raise Exception("Cant delete %s: %s" % (obj, _extract_gcs_api_response_error(str(e)))) if errs: raise Exception(.join(errs))
Delete one or more buckets or objects.
6,782
def serialize_on_parent( self, parent, value, state ): xml_value = _hooks_apply_before_serialize(self._hooks, state, value) self._processor.serialize_on_parent(parent, xml_value, state)
Serialize the value directory on the parent.
6,783
def validate(self, pkt, messages=None): valid = True for f in self.fields: try: value = getattr(pkt, f.name) except AttributeError: valid = False if messages is not None: msg = "Telemetry field mismatch for packet . " msg += "Unable to retrieve value for %s in Packet." values = self.name, f.name messages.append(msg % values) break if f.validate(value, messages) is False: valid = False return valid
Returns True if the given Packet is valid, False otherwise. Validation error messages are appended to an optional messages array.
6,784
def schaffer(self, x): N = len(x) s = x[0:N - 1]**2 + x[1:N]**2 return sum(s**0.25 * (np.sin(50 * s**0.1)**2 + 1))
Schaffer function x0 in [-100..100]
6,785
def _latex_circuit_drawer(circuit, scale=0.7, filename=None, style=None, plot_barriers=True, reverse_bits=False, justify=None): tmpfilename = with tempfile.TemporaryDirectory() as tmpdirname: tmppath = os.path.join(tmpdirname, tmpfilename + ) _generate_latex_source(circuit, filename=tmppath, scale=scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify) image = None try: subprocess.run(["pdflatex", "-halt-on-error", "-output-directory={}".format(tmpdirname), "{}".format(tmpfilename + )], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=True) except OSError as ex: if ex.errno == errno.ENOENT: logger.warning( ) raise except subprocess.CalledProcessError as ex: with open(, ) as error_file: error_file.write(ex.stdout) logger.warning( ) raise else: try: base = os.path.join(tmpdirname, tmpfilename) subprocess.run(["pdftocairo", "-singlefile", "-png", "-q", base + , base]) image = Image.open(base + ) image = utils._trim(image) os.remove(base + ) if filename: image.save(filename, ) except OSError as ex: if ex.errno == errno.ENOENT: logger.warning( ) raise return image
Draw a quantum circuit based on latex (Qcircuit package) Requires version >=2.6.0 of the qcircuit LaTeX package. Args: circuit (QuantumCircuit): a quantum circuit scale (float): scaling factor filename (str): file path to save image to style (dict or str): dictionary of style or file name of style file reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how the circuit should be justified. Returns: PIL.Image: an in-memory representation of the circuit diagram Raises: OSError: usually indicates that ```pdflatex``` or ```pdftocairo``` is missing. CalledProcessError: usually points errors during diagram creation.
6,786
def getCandScoresMapBruteForce(self, profile): wmg = profile.getWmg(True) m = len(wmg.keys()) cands = range(m) V = self.createBinaryRelation(m) gains = dict() for cand in wmg.keys(): gains[cand] = 0 graphs = itertools.product(range(2), repeat=m*(m-1)/2) for comb in graphs: prob = 1 i = 0 for a, b in itertools.combinations(cands,2): V[a][b] = comb[i] V[b][a] = 1-comb[i] if comb[i] > 0: prob *= 1/(1+self.phi ** float(wmg[a+1][b+1])) else: prob *= 1/(1+self.phi ** float(wmg[b+1][a+1])) i += 1 if i >= m*(m-1)/2: break for cand in wmg.keys(): gains[cand] += self.utilityFunction.getUtility([cand], V)*prob return gains
Returns a dictonary that associates the integer representation of each candidate with the bayesian losses that we calculate using brute force. :ivar Profile profile: A Profile object that represents an election profile.
6,787
def scan_module(self, modpath, node): used_origins = self.map.setdefault(modpath, set()) def get_origins(modpath, name): origins = set() def walk_origins(modpath, name): for origin in self.import_map.get_origins(modpath, name): if origin not in origins: origins.add(origin) if in origin: walk_origins(*origin.rsplit(, 1)) walk_origins(modpath, name) return origins def get_origins_for_node(node): if node_type(node) == and node_type(node.ctx) == : return {modpath + + node.id} | get_origins(modpath, node.id) if node_type(node) == and node_type(node.ctx) == : return set.union(set(), *[ {parent + + node.attr} | get_origins(parent, node.attr) for parent in get_origins_for_node(node.value)]) return set() def get_origins_used_by_node(node): if node_type(node) == : return get_origins_for_node(node) if node_type(node) == : return set.union(get_origins_used_by_node(node.value), get_origins_for_node(node)) return set() def scan_loads(node): if node_type(node) in [, ]: used_origins.update(get_origins_used_by_node(node)) for_each_child(node, scan_loads) for_each_child(node, scan_loads) intermediate_origins = set() for origin in used_origins: parts = origin.split() for i in range(1, len(parts)): intermediate_origins.add(.join(parts[:i])) used_origins.update(intermediate_origins)
Scans a module, collecting all used origins, assuming that modules are obtained only by dotted paths and no other kinds of expressions.
6,788
def Nu_Kitoh(Re, Pr, H=None, G=None, q=None): r if H and G and q: qht = 200.*G**1.2 if H < 1.5E6: fc = 2.9E-8 + 0.11/qht elif 1.5E6 <= H <= 3.3E6: fc = -8.7E-8 - 0.65/qht else: fc = -9.7E-7 + 1.3/qht m = 0.69 - 81000./qht + fc*q else: m = 0.69 return 0.015*Re**0.85*Pr**m
r'''Calculates internal convection Nusselt number for turbulent vertical upward flow in a pipe under supercritical conditions according to [1]_, also shown in [2]_, [3]_ and [4]_. Depends on fluid enthalpy, mass flux, and heat flux. .. math:: Nu_b = 0.015Re_b^{0.85} Pr_b^m m = 0.69 - \frac{81000}{q_{dht}} + f_cq q_{dht} = 200 G^{1.2} f_c = 2.9\times10^{-8} + \frac{0.11}{q_{dht}} \text{ for } H_b < 1500 \text{ kJ/kg} f_c = -8.7\times10^{-8} - \frac{0.65}{q_{dht}} \text{ for } 1500 \text{ kJ/kg} < H_b < 3300 \text{ kJ/kg} f_c = -9.7\times10^{-7} + \frac{1.3}{q_{dht}} \text{ for } H_b > 3300 \text{ kJ/kg} Parameters ---------- Re : float Reynolds number with bulk fluid properties, [-] Pr : float Prandtl number with bulk fluid properties, [-] H : float, optional Enthalpy of water (if the fluid is water), [J/kg] G : float, optional Mass flux of the fluid, [kg/m^2/s] q : float, optional Heat flux to wall, [W/m^2] Returns ------- Nu : float Nusselt number as explained below, [-] Notes ----- The reference point for the enthalpy values is not stated in [1]_. The upper and lower enthalpy limits for this correlation are 4000 kJ/kg and 0 kJ/kg, but these are not enforced in this function. If not all of H, G, and q are provided, the correlation is used without the correction. This correlation was ranked 6th best in [3]_, and found 4th best for enhanced heat transfer in [2]_ with a MAD of 12.3%. For the data used to develop the correlation, G varied from 100-1750 kg/m^2/s, q varied from 0 to 1800 kW/m^2, and bulk temperature varied from 20 to 550 decrees Celsius. This correlation does not have realistic behavior for values outside those used in the study, and should not be used. Examples -------- >>> Nu_Kitoh(1E5, 1.2, 1.3E6, 1500, 5E6) 331.80234139591306 References ---------- .. [1] Kitoh, Kazuaki, Seiichi Koshizuka, and Yoshiaki Oka. "Refinement of Transient Criteria and Safety Analysis for a High-Temperature Reactor Cooled by Supercritical Water." Nuclear Technology 135, no. 3 (September 1, 2001): 252-64. .. [2] Chen, Weiwei, Xiande Fang, Yu Xu, and Xianghui Su. "An Assessment of Correlations of Forced Convection Heat Transfer to Water at Supercritical Pressure." Annals of Nuclear Energy 76 (February 2015): 451-60. doi:10.1016/j.anucene.2014.10.027. .. [3] Yu, Jiyang, Baoshan Jia, Dan Wu, and Daling Wang. "Optimization of Heat Transfer Coefficient Correlation at Supercritical Pressure Using Genetic Algorithms." Heat and Mass Transfer 45, no. 6 (January 8, 2009): 757-66. doi:10.1007/s00231-008-0475-4. .. [4] Jäger, Wadim, Victor Hugo Sánchez Espinoza, and Antonio Hurtado. "Review and Proposal for Heat Transfer Predictions at Supercritical Water Conditions Using Existing Correlations and Experiments." Nuclear Engineering and Design, (W3MDM) University of Leeds International Symposium: What Where When? Multi-dimensional Advances for Industrial Process Monitoring, 241, no. 6 (June 2011): 2184-2203. doi:10.1016/j.nucengdes.2011.03.022.
6,789
def raises(self, expected_exception): return unittest_case.assertRaises(expected_exception, self._orig_subject, *self._args, **self._kwargs)
Ensures preceding predicates (specifically, :meth:`called_with()`) result in *expected_exception* being raised.
6,790
def run(cmd, data=None, checks=None, region=None, log_error=True, log_stdout=False): try: logger.debug(" ".join(str(x) for x in cmd) if not isinstance(cmd, basestring) else cmd) _do_run(cmd, checks, log_stdout) except: if log_error: logger.info("error at command") raise
Run the provided command, logging details and checking for errors.
6,791
def install_cache(expire_after=12 * 3600, cache_post=False): allowable_methods = [] if cache_post: allowable_methods.append() requests_cache.install_cache( expire_after=expire_after, allowable_methods=allowable_methods)
Patches the requests library with requests_cache.
6,792
def unsubscribe(self, connection, destination): self.log.debug("Unsubscribing %s from %s" % (connection, destination)) if connection in self._topics[destination]: self._topics[destination].remove(connection) if not self._topics[destination]: del self._topics[destination]
Unsubscribes a connection from the specified topic destination. @param connection: The client connection to unsubscribe. @type connection: L{coilmq.server.StompConnection} @param destination: The topic destination (e.g. '/topic/foo') @type destination: C{str}
6,793
def translate_expression(expression): logger_ts.info("enter translate_expression") m = re_filter_expr.findall(expression) matches = [] if m: for i in m: logger_ts.info("parse match: {}".format(i)) tmp = list(i[1:]) if tmp[1] in COMPARISONS: tmp[1] = COMPARISONS[tmp[1]] tmp[0] = cast_float(tmp[0]) tmp[2] = cast_float(tmp[2]) matches.append(tmp) else: logger_ts.warn("translate_expression: invalid expression: {}".format(expression)) print("Invalid input expression") logger_ts.info("exit translate_expression") return matches
Check if the expression is valid, then check turn it into an expression that can be used for filtering. :return list of lists: One or more matches. Each list has 3 strings.
6,794
def date(self): if self.commit_time: return datetime.utcfromtimestamp(self.commit_time) else: return datetime.now()
:return: datetime object
6,795
def And(*predicates, **kwargs): if kwargs: predicates += Query(**kwargs), return _flatten(_And, *predicates)
`And` predicate. Returns ``False`` at the first sub-predicate that returns ``False``.
6,796
def home_shift_summ(self): if not self.__wrapped_home: self.__wrapped_home = self.__wrap(self._home.by_player) return self.__wrapped_home
:returns: :py:class:`.ShiftSummary` by player for the home team :rtype: dict ``{ player_num: shift_summary_obj }``
6,797
def find_files(self, ID=None, fileGrp=None, pageId=None, mimetype=None, url=None, local_only=False): ret = [] fileGrp_clause = if fileGrp is None else % fileGrp file_clause = if ID is not None: file_clause += % ID if mimetype is not None: file_clause += % mimetype if url is not None: file_clause += % url file_ids = self._tree.getroot().xpath("//mets:fileGrp%s/mets:file%s/@ID" % (fileGrp_clause, file_clause), namespaces=NS) if pageId is not None: by_pageid = self._tree.getroot().xpath( % pageId, namespaces=NS) file_ids = [i for i in by_pageid if i in file_ids] for file_id in file_ids: el = self._tree.getroot().find( % file_id, NS) if file_id not in self._file_by_id: self._file_by_id[file_id] = OcrdFile(el, mets=self) url = self._file_by_id[file_id].url if local_only and not (url.startswith() or not in url): continue ret.append(self._file_by_id[file_id]) return ret
Search ``mets:file`` in this METS document. Args: ID (string) : ID of the file fileGrp (string) : USE of the fileGrp to list files of pageId (string) : ID of physical page manifested by matching files url (string) : @xlink:href of mets:Flocat of mets:file mimetype (string) : MIMETYPE of matching files local (boolean) : Whether to restrict results to local files, i.e. file://-URL Return: List of files.
6,798
def vstackm(matrices): arr = np_vstack(tuple(m.matrix for m in matrices)) return Matrix(arr)
Generalizes `numpy.vstack` to :class:`Matrix` objects.
6,799
def noinfo(self, msg, oname): print % msg, if oname: print % oname else: print
Generic message when no information is found.