Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
16,700
def get_tagged(self, event): self.log("Tagged objects request for", event.data, "from", event.user, lvl=debug) if event.data in self.tags: tagged = self._get_tagged(event.data) response = { : , : , : tagged } self.fireEvent(send(event.client.uuid, response)) else: self.log("Unavailable schema requested!", lvl=warn)
Return a list of tagged objects for a schema
16,701
def set_isotopic_ratio(self, compound=, element=, list_ratio=[]): _stack = self.stack list_compounds = _stack.keys() if compound not in _stack.keys(): list_compounds_joined = .join(list_compounds) raise ValueError("Compound could not be find in {}".format(compound, list_compounds_joined)) if element == : element = compound list_element = _stack[compound].keys() if element not in list_element: list_element_joined = .join(list_element) raise ValueError("Element should be any of those elements: {}".format(element, list_element_joined)) old_list_ratio = _stack[compound][element][][] if not (len(old_list_ratio) == len(list_ratio)): raise ValueError("New list of ratio ({} elements) does not match old list size ({} elements!".format(len( list_ratio), len(old_list_ratio))) self.stack[compound][element][][] = list_ratio self.__update_molar_mass(compound=compound, element=element) self.__update_density(compound=compound, element=element) self.__math_on_stack()
defines the new set of ratio of the compound/element and trigger the calculation to update the density Parameters: =========== compound: string (default is ''). Name of compound element: string (default is ''). Name of element list_ratio: list (default is []). list of new stoichiometric_ratio Raises: ======= ValueError if compound does not exist ValueError if element does not exist ValueError if list_ratio does not have the right format
16,702
def calc_fwhm_gaussian(self, arr1d, medv=None, gauss_fn=None): if gauss_fn is None: gauss_fn = self.gaussian N = len(arr1d) X = np.array(list(range(N))) Y = arr1d if medv is None: medv = get_median(Y) Y = Y - medv maxv = Y.max() Y = Y.clip(0, maxv) p0 = [0, N - 1, maxv] errfunc = lambda p, x, y: gauss_fn(x, p) - y with self.lock: p1, success = optimize.leastsq(errfunc, p0[:], args=(X, Y)) if not success: raise IQCalcError("FWHM gaussian fitting failed") mu, sdev, maxv = p1 self.logger.debug("mu=%f sdev=%f maxv=%f" % (mu, sdev, maxv)) fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0)) * sdev fwhm = float(fwhm) mu = float(mu) sdev = float(sdev) maxv = float(maxv) res = Bunch.Bunch(fwhm=fwhm, mu=mu, sdev=sdev, maxv=maxv, fit_fn=gauss_fn, fit_args=[mu, sdev, maxv]) return res
FWHM calculation on a 1D array by using least square fitting of a gaussian function on the data. arr1d is a 1D array cut in either X or Y direction on the object.
16,703
def convert_data_to_ndarray(self): if self._data_structure != "DataFrame": raise Exception(f"Data is not a DataFrame but {self._data_structure}.") self._data = self._convert_to_ndarray(self._data) self._update_data_structure() return self
Converts the data from dataframe to ndarray format. Assumption: df-columns are ndarray-layers (3rd dim.)
16,704
def duplicate(self, host): duplicates = [] safe_key_value = re.sub(r + "`~!$%^&*\"|]+_$$check_commandaggregationevent_handler$$service_dependencies$$', key_value[key] ) duplicates.append(new_s) return duplicates
For a given host, look for all copy we must create for for_each property :param host: alignak host object :type host: alignak.objects.host.Host :return: list :rtype: list
16,705
def getLeader(self, vehID, dist=0.): self._connection._beginMessage( tc.CMD_GET_VEHICLE_VARIABLE, tc.VAR_LEADER, vehID, 1 + 8) self._connection._string += struct.pack("!Bd", tc.TYPE_DOUBLE, dist) return _readLeader(self._connection._checkResult(tc.CMD_GET_VEHICLE_VARIABLE, tc.VAR_LEADER, vehID))
getLeader(string, double) -> (string, double) Return the leading vehicle id together with the distance. The distance is measured from the front + minGap to the back of the leader, so it does not include the minGap of the vehicle. The dist parameter defines the maximum lookahead, 0 calculates a lookahead from the brake gap. Note that the returned leader may be farther away than the given dist.
16,706
def allow(self, ctx, acls): for acl in acls: if self._identity == acl: return True return False
Allow access to any ACL members that was equal to the user name. That is, some user u is considered a member of group u and no other.
16,707
def _check_tcpdump(): with open(os.devnull, ) as devnull: try: proc = subprocess.Popen([conf.prog.tcpdump, "--version"], stdout=devnull, stderr=subprocess.STDOUT) except OSError: return False if OPENBSD: return proc.wait() == 1 else: return proc.wait() == 0
Return True if the tcpdump command can be started
16,708
def _do_synchronise_jobs(walltime, machines): offset = SYNCHRONISATION_OFFSET start = time.time() + offset _t = time.strptime(walltime, "%H:%M:%S") _walltime = _t.tm_hour * 3600 + _t.tm_min * 60 + _t.tm_sec demands = defaultdict(int) for machine in machines: cluster = machine["cluster"] demands[cluster] += machine["nodes"] if len(list(demands.keys())) <= 1: logger.debug("Only one cluster detected: no synchronisation needed") return None clusters = clusters_sites_obj(list(demands.keys())) sites = set(list(clusters.values())) if len(sites) <= 1: logger.debug("Only one site detected: no synchronisation needed") return None ok = True for cluster, nodes in demands.items(): cluster_status = clusters[cluster].status.list() ok = ok and can_start_on_cluster(cluster_status.nodes, nodes, start, _walltime) if not ok: break if ok: logger.info("Reservation_date=%s (%s)" % (_date2h(start), sites)) return start if start is None: raise EnosG5kSynchronisationError(sites)
This returns a common reservation date for all the jobs. This reservation date is really only a hint and will be supplied to each oar server. Without this *common* reservation_date, one oar server can decide to postpone the start of the job while the other are already running. But this doens't prevent the start of a job on one site to drift (e.g because the machines need to be restarted.) But this shouldn't exceed few minutes.
16,709
def _validate_message(self, message): if message.tag != : status = -1 raise exception.IloClientInternalError(message, status) for child in message: if child.tag != : return message status = int(child.get(), 16) msg = child.get() if status == 0 and msg != : return msg if status != 0: if in msg or in msg: for cmd in BOOT_MODE_CMDS: if cmd in msg: platform = self.get_product_name() msg = ("%(cmd)s is not supported on %(platform)s" % {: cmd, : platform}) LOG.debug(self._("Got invalid response with " "message: "), {: msg}) raise (exception.IloCommandNotSupportedError (msg, status)) else: LOG.debug(self._("Got invalid response with " "message: "), {: msg}) raise exception.IloClientInternalError(msg, status) if (status in exception.IloLoginFailError.statuses or msg in exception.IloLoginFailError.messages): LOG.debug(self._("Got invalid response with " "message: "), {: msg}) raise exception.IloLoginFailError(msg, status) LOG.debug(self._("Got invalid response with " "message: "), {: msg}) raise exception.IloError(msg, status)
Validate XML response from iLO. This function validates the XML response to see if the exit status is 0 or not in the response. If the status is non-zero it raises exception.
16,710
def clip_or_fit_solutions(self, pop, idx): for k in idx: self.repair_genotype(pop[k])
make sure that solutions fit to sample distribution, this interface will probably change. In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited.
16,711
async def deserialize(data: dict): return await Proof._deserialize("vcx_proof_deserialize", json.dumps(data), data.get().get())
Builds a Proof object with defined attributes. Attributes are provided by a previous call to the serialize function. :param data: Example: name = "proof name" requested_attrs = [{"name": "age", "restrictions": [{"schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766" } ] }, { "name":"name", "restrictions": [ { "schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766"}]}] proof = await Proof.create(source_id, name, requested_attrs) data = proof.serialize() proof2 = await Proof.deserialize(data) :return: Proof Object
16,712
def start_heron_tools(masters, cl_args): single_master = list(masters)[0] wait_for_master_to_start(single_master) cmd = "%s run %s >> /tmp/heron_tools_start.log 2>&1 &" \ % (get_nomad_path(cl_args), get_heron_tools_job_file(cl_args)) Log.info("Starting Heron Tools on %s" % single_master) if not is_self(single_master): cmd = ssh_remote_execute(cmd, single_master, cl_args) Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = pid.wait() output = pid.communicate() Log.debug("return code: %s output: %s" % (return_code, output)) if return_code != 0: Log.error("Failed to start Heron Tools on %s with error:\n%s" % (single_master, output[1])) sys.exit(-1) wait_for_job_to_start(single_master, "heron-tools") Log.info("Done starting Heron Tools")
Start Heron tracker and UI
16,713
def _request(self, method, uri_relative, request_bytes, params, custom_headers): uri_relative_with_params = self._append_params_to_uri(uri_relative, params) if uri_relative not in self._URIS_NOT_REQUIRING_ACTIVE_SESSION: if self._api_context.ensure_session_active(): from bunq.sdk.context import BunqContext BunqContext.update_api_context(self._api_context) all_headers = self._get_all_headers( method, uri_relative_with_params, request_bytes, custom_headers ) response = requests.request( method, self._get_uri_full(uri_relative_with_params), data=request_bytes, headers=all_headers, proxies={self._FIELD_PROXY_HTTPS: self._api_context.proxy_url}, ) self._assert_response_success(response) if self._api_context.installation_context is not None: security.validate_response( self._api_context.installation_context.public_key_server, response.status_code, response.content, response.headers ) return self._create_bunq_response_raw(response)
:type method: str :type uri_relative: str :type request_bytes: bytes :type params: dict[str, str] :type custom_headers: dict[str, str] :return: BunqResponseRaw
16,714
def learnObject(self, objectDescription, randomLocation=False, useNoise=False, noisyTrainingTime=1): self.reset() self.column.activateRandomLocation() locationsAreUnique = True if randomLocation or useNoise: numIters = noisyTrainingTime else: numIters = 1 for i in xrange(numIters): for iFeature, feature in enumerate(objectDescription["features"]): self._move(feature, randomLocation=randomLocation, useNoise=useNoise) featureSDR = self.features[feature["name"]] self._sense(featureSDR, learn=True, waitForSettle=False) locationRepresentation = self.column.getSensoryAssociatedLocationRepresentation() self.locationRepresentations[(objectDescription["name"], iFeature)].append(locationRepresentation) self.inputRepresentations[(objectDescription["name"], iFeature, feature["name"])] = ( self.column.L4.getWinnerCells()) locationTuple = tuple(locationRepresentation) locationsAreUnique = (locationsAreUnique and locationTuple not in self.representationSet) self.representationSet.add(tuple(locationRepresentation)) self.learnedObjects.append(objectDescription) return locationsAreUnique
Train the network to recognize the specified object. Move the sensor to one of its features and activate a random location representation in the location layer. Move the sensor over the object, updating the location representation through path integration. At each point on the object, form reciprocal connections between the represention of the location and the representation of the sensory input. @param objectDescription (dict) For example: {"name": "Object 1", "features": [{"top": 0, "left": 0, "width": 10, "height": 10, "name": "A"}, {"top": 0, "left": 10, "width": 10, "height": 10, "name": "B"}]} @return locationsAreUnique (bool) True if this object was assigned a unique set of locations. False if a location on this object has the same location representation as another location somewhere else.
16,715
def get_instance(self): try: return self._instance except AttributeError: self._instance = self._decorated() return self._instance
Returns the singleton instance. Upon its first call, it creates a new instance of the decorated class and calls its `__init__` method. On all subsequent calls, the already created instance is returned.
16,716
def _dK_dR(self, R): return -self._ns * self._N / R**2 / self._sin_alpha
Return numpy array of dK/dR from K1 up to and including Kn.
16,717
def run(self, file_list): data = {: 0, : 0, : []} for filename in file_list: path, fname = os.path.split(filename) if os.path.splitext(filename)[1] != : return data
Runs pylint on the list of files and return a dictionary: {<filename>: [list of pylint errors], 'total': <int> - Total number of pylint messages, 'errors': <int> - Number of pylint errors, 'scores': (<filename>, score) - Individual score for each file.} :param file_list: :return:
16,718
def _interpolationFunctionFactory(self, spline_order=None, cval=None): import scipy.ndimage if spline_order is None: spline_order = self.interpolation_spline_order if cval is None: cval = self.interpolation_cval data = self.grid if cval is None: cval = data.min() try: _data = data.filled(cval) except AttributeError: _data = data coeffs = scipy.ndimage.spline_filter(_data, order=spline_order) x0 = self.origin dx = self.delta def _transform(cnew, c0, dc): return (numpy.atleast_1d(cnew) - c0) / dc def interpolatedF(*coordinates): _coordinates = numpy.array( [_transform(coordinates[i], x0[i], dx[i]) for i in range(len( coordinates))]) return scipy.ndimage.map_coordinates(coeffs, _coordinates, prefilter=False, mode=, cval=cval) return interpolatedF
Returns a function F(x,y,z) that interpolates any values on the grid. _interpolationFunctionFactory(self,spline_order=3,cval=None) --> F *cval* is set to :meth:`Grid.grid.min`. *cval* cannot be chosen too large or too small or NaN because otherwise the spline interpolation breaks down near that region and produces wild oscillations. .. Note:: Only correct for equally spaced values (i.e. regular edges with constant delta). .. SeeAlso:: http://www.scipy.org/Cookbook/Interpolation
16,719
def _group_and_publish_tasks_statistics(self, result): for i in result: executor_id = i[] i[] = executor_id[:executor_id.rfind()] i[][] = 1 r = {} for i in result: executor_id = i[] r[executor_id] = r.get(executor_id, {}) r[executor_id][] = i[] r[executor_id][] = r[executor_id].get(, {}) r[executor_id][] = self._sum_statistics( i[], r[executor_id][]) self._add_cpu_usage(r) self._add_cpu_percent(r) self._add_mem_percent(r) self._publish(r)
This function group statistics of same tasks by adding them. It also add 'instances_count' statistic to get information about how many instances is running on the server Args: result: result of mesos query. List of dictionaries with 'executor_id', 'framework_id' as a strings and 'statistics' as dictionary of labeled numbers
16,720
def restore_geometry_on_layout_change(self, value): if value is not None: assert type(value) is bool, " attribute: type is not !".format( "restore_geometry_on_layout_change", value) self.__restore_geometry_on_layout_change = value
Setter for **self.__restore_geometry_on_layout_change** attribute. :param value: Attribute value. :type value: bool
16,721
def wet_bulb_from_db_rh(db_temp, rh, b_press=101325): es = 6.112 * math.e**((17.67 * db_temp) / (db_temp + 243.5)) e = (es * rh) / 100 t_w = 0 increse = 10.0 previoussign = 1 e_d = 1 while math.fabs(e_d) > 0.005: e_wg = 6.112 * (math.e**((17.67 * t_w) / (t_w + 243.5))) eg = e_wg - (b_press/100) * (db_temp - t_w) * 0.00066 * (1 + (0.00155 * t_w)) e_d = e - eg if e_d == 0: break else: if e_d < 0: cursign = -1 if cursign != previoussign: previoussign = cursign increse = increse / 10 else: increse = increse else: cursign = 1 if cursign != previoussign: previoussign = cursign increse = increse/10 else: increse = increse t_w = t_w + increse * previoussign return t_w
Wet Bulb Temperature (C) at db_temp (C), Relative Humidity rh (%), and Pressure b_press (Pa). Note: [1] J. Sullivan and L. D. Sanders. "Method for obtaining wet-bulb temperatures by modifying the psychrometric formula." Center for Experiment Design and Data Analysis. NOAA - National Oceanic and Atmospheric Administration. http://www.srh.noaa.gov/epz/?n=wxcalc_rh
16,722
def leaf_sections(h): leaves = [] for section in h.allsec(): sref = h.SectionRef(sec=section) if sref.nchild() < 0.9: leaves.append(section) return leaves
Returns a list of all sections that have no children.
16,723
def activate_user(self, user): if not user.active: user.active = True return True return False
Activates a specified user. Returns `True` if a change was made. :param user: The user to activate
16,724
def apply_palette(img, palette, options): if not options.quiet: print() bg_color = palette[0] fg_mask = get_fg_mask(bg_color, img, options) orig_shape = img.shape pixels = img.reshape((-1, 3)) fg_mask = fg_mask.flatten() num_pixels = pixels.shape[0] labels = np.zeros(num_pixels, dtype=np.uint8) labels[fg_mask], _ = vq(pixels[fg_mask], palette) return labels.reshape(orig_shape[:-1])
Apply the pallete to the given image. The first step is to set all background pixels to the background color; then, nearest-neighbor matching is used to map each foreground color to the closest one in the palette.
16,725
def download_interim_for_gssha(main_directory, start_datetime, end_datetime, leftlon=-180, rightlon=180, toplat=90, bottomlat=-90, precip_only=False): from ecmwfapi import ECMWFDataServer server = ECMWFDataServer() try: mkdir(main_directory) except OSError: pass download_area = "{toplat}/{leftlon}/{bottomlat}/{rightlon}".format(toplat=toplat, leftlon=leftlon, bottomlat=bottomlat, rightlon=rightlon) download_datetime = start_datetime interim_request = { : "interim", : "oper", : "sfc", : "0.5/0.5", : download_area, : , } while download_datetime <= end_datetime: interim_request[] = download_datetime.strftime("%Y-%m-%d") if not precip_only: download_file = path.join(main_directory, "erai_gssha_{0}_an.nc".format(download_datetime.strftime("%Y%m%d"))) if not path.exists(download_file): interim_request[] = "an" interim_request[] = "2t/2d/sp/10u/10v/tcc" interim_request[] = "0" interim_request[] = "00/06/12/18" interim_request[] = download_file server.retrieve(interim_request) download_file = path.join(main_directory, "erai_gssha_{0}_1_fc.nc".format(download_datetime.strftime("%Y%m%d"))) if not path.exists(download_file): interim_request[] = "fc" interim_request[] = "2t/2d/sp/10u/10v/tcc" interim_request[] = "3" interim_request[] = "00/06/12/18" interim_request[] = download_file server.retrieve(interim_request) download_file = path.join(main_directory, "erai_gssha_{0}_fc.nc".format(download_datetime.strftime("%Y%m%d"))) if not path.exists(download_file): interim_request[] = "fc" interim_request[] = "tp/ssrd" interim_request[] = "3/6/9/12" interim_request[] = "00/12" interim_request[] = download_file server.retrieve(interim_request) tmp_download_file = download_file + with xr.open_dataset(download_file) as xd: diff_xd = xd.diff() xd.tp[1:4] = diff_xd.tp[:3] xd.tp[5:] = diff_xd.tp[4:] xd.ssrd[1:4] = diff_xd.ssrd[:3] xd.ssrd[5:] = diff_xd.ssrd[4:] xd.to_netcdf(tmp_download_file) remove(download_file) rename(tmp_download_file, download_file) download_file = path.join(main_directory, "erai_gssha_{0}_0_fc.nc".format(download_datetime.strftime("%Y%m%d"))) if download_datetime <= start_datetime and not path.exists(download_file): loc_download_date = (download_datetime-timedelta(1)).strftime("%Y-%m-%d") interim_request[] = "fc" interim_request[] = "tp/ssrd" interim_request[] = "9/12" interim_request[] = "12" interim_request[] = download_file interim_request[] = loc_download_date server.retrieve(interim_request) tmp_download_file = download_file + with xr.open_dataset(download_file) as xd: inc_xd = xd.diff() inc_xd.to_netcdf(tmp_download_file) remove(download_file) rename(tmp_download_file, download_file) download_datetime += timedelta(1)
Function to download ERA5 data for GSSHA .. note:: https://software.ecmwf.int/wiki/display/WEBAPI/Access+ECMWF+Public+Datasets Args: main_directory(:obj:`str`): Location of the output for the forecast data. start_datetime(:obj:`str`): Datetime for download start. end_datetime(:obj:`str`): Datetime for download end. leftlon(Optional[:obj:`float`]): Left bound for longitude. Default is -180. rightlon(Optional[:obj:`float`]): Right bound for longitude. Default is 180. toplat(Optional[:obj:`float`]): Top bound for latitude. Default is 90. bottomlat(Optional[:obj:`float`]): Bottom bound for latitude. Default is -90. precip_only(Optional[bool]): If True, will only download precipitation. Example:: from gsshapy.grid.era_to_gssha import download_era_interim_for_gssha era_interim_folder = '/era_interim' leftlon = -95 rightlon = -75 toplat = 35 bottomlat = 30 download_era_interim_for_gssha(era5_folder, leftlon, rightlon, toplat, bottomlat)
16,726
def reporter(self): make_path(self.reportpath) logging.info(.format(self.analysistype)) header = data = with open(self.sixteens_report, ) as report: with open(os.path.join(self.reportpath, self.analysistype + ), ) as sequences: for sample in self.runmetadata.samples: sample[self.analysistype].sixteens_match = sample[self.analysistype].species = try: sample[self.analysistype].besthit = sorted(sample[self.analysistype].resultssnp.items(), key=operator.itemgetter(1))[0][0] for record in SeqIO.parse(sample[self.analysistype].baitfile, ): if sample[self.analysistype].besthit in record.id: sample[self.analysistype].sixteens_match = record.description.split()[0] sample[self.analysistype].species = \ sample[self.analysistype].sixteens_match.split()[-1].split()[1] data += sample.name + for name, identity in sample[self.analysistype].results.items(): if name == sample[self.analysistype].besthit: data += .format(name, identity, sample[self.analysistype].genus, sample[self.analysistype].avgdepth[name]) record = SeqRecord(Seq(sample[self.analysistype].sequences[name], IUPAC.unambiguous_dna), id=.format(sample.name, ), description=) SeqIO.write(record, sequences, ) except (AttributeError, IndexError): data += .format(sample.name) report.write(header) report.write(data)
Creates a report of the results
16,727
def getClassAllSubs(self, aURI): aURI = aURI try: qres = self.rdfgraph.query( % (aURI)) except: printDebug("... warning: the query failed (maybe missing SPARQL 1.1 support?)") qres = [] return list(qres)
note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above
16,728
async def async_input(prompt): print(prompt, end=, flush=True) return (await loop.run_in_executor(None, sys.stdin.readline)).rstrip()
Python's ``input()`` is blocking, which means the event loop we set above can't be running while we're blocking there. This method will let the loop run while we wait for input.
16,729
def loads(string): d = _loads(string) for k, v in d.items(): FILTERS[dr.get_component(k) or k] = set(v)
Loads the filters dictionary given a string.
16,730
def get_location(conn, vm_): locations = conn.list_locations() vm_location = config.get_cloud_config_value(, vm_, __opts__) if not six.PY3: vm_location = vm_location.encode( , ) for img in locations: if isinstance(img.id, six.string_types) and not six.PY3: img_id = img.id.encode(, ) else: img_id = str(img.id) if isinstance(img.name, six.string_types) and not six.PY3: img_name = img.name.encode(, ) else: img_name = str(img.name) if vm_location and vm_location in (img_id, img_name): return img raise SaltCloudNotFound( {0}\.format( vm_location ) )
Return the location object to use
16,731
def find_one(self, id_, raw=True, recovery_name=True): url = "https://api.knackhq.com/v1/objects/%s/records/%s" % ( self.key, id_) res = self.get(url) if raw: try: res = self.get_raw_values(res, recovery_name=recovery_name) except: pass else: try: res = self.get_html_values(res, recovery_name=recovery_name) except: pass return res
Find one record. Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve :param id_: record id_ :param using_name: if you are using field name in filter and sort_field, please set using_name = True (it's the default), otherwise, False :param raw: Default True, set True if you want the data in raw format. Otherwise, html format :param recovery_name: Default True, set True if you want field name instead of field key **中文文档** 返回一条记录
16,732
def to_message(self, keywords=None, show_header=True): if keywords is None and self.layer is not None: keywords = self.read_keywords(self.layer) preferred_order = [ , , , , , , , , , , , , , , , , , , , , , , ] report = m.Message() if show_header: logo_element = m.Brand() report.add(logo_element) report.add(m.Heading(tr( ), **styles.BLUE_LEVEL_4_STYLE)) report.add(m.Text(tr( ))) table = m.Table(style_class=) for keyword in preferred_order: if keyword in keywords: value = keywords[keyword] row = self._keyword_to_row(keyword, value) keywords.pop(keyword) table.add(row) for keyword in keywords: value = keywords[keyword] row = self._keyword_to_row(keyword, value) table.add(row) if self.layer: keyword = tr() value = self.layer.crs().authid() row = self._keyword_to_row(keyword, value) table.add(row) keyword = tr() value = self.layer.publicSource() row = self._keyword_to_row(keyword, value, wrap_slash=True) table.add(row) report.add(table) return report
Format keywords as a message object. .. versionadded:: 3.2 .. versionchanged:: 3.3 - default keywords to None The message object can then be rendered to html, plain text etc. :param keywords: Keywords to be converted to a message. Optional. If not passed then we will attempt to get keywords from self.layer if it is not None. :type keywords: dict :param show_header: Flag indicating if InaSAFE logo etc. should be added above the keywords table. Default is True. :type show_header: bool :returns: A safe message object containing a table. :rtype: safe.messaging.Message
16,733
def _to_dict(self): _dict = {} if hasattr(self, ) and self.classifier_id is not None: _dict[] = self.classifier_id if hasattr(self, ) and self.url is not None: _dict[] = self.url if hasattr(self, ) and self.collection is not None: _dict[] = [x._to_dict() for x in self.collection] return _dict
Return a json dictionary representing this model.
16,734
def load_wmt_en_fr_dataset(path=): path = os.path.join(path, ) _WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/" _WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/" def gunzip_file(gz_path, new_path): logging.info("Unpacking %s to %s" % (gz_path, new_path)) with gzip.open(gz_path, "rb") as gz_file: with open(new_path, "wb") as new_file: for line in gz_file: new_file.write(line) def get_wmt_enfr_train_set(path): filename = "training-giga-fren.tar" maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True) train_path = os.path.join(path, "giga-fren.release2.fixed") gunzip_file(train_path + ".fr.gz", train_path + ".fr") gunzip_file(train_path + ".en.gz", train_path + ".en") return train_path def get_wmt_enfr_dev_set(path): filename = "dev-v2.tgz" dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False) dev_name = "newstest2013" dev_path = os.path.join(path, "newstest2013") if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")): logging.info("Extracting tgz file %s" % dev_file) with tarfile.open(dev_file, "r:gz") as dev_tar: fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr") en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en") fr_dev_file.name = dev_name + ".fr" en_dev_file.name = dev_name + ".en" dev_tar.extract(fr_dev_file, path) dev_tar.extract(en_dev_file, path) return dev_path logging.info("Load or Download WMT English-to-French translation > {}".format(path)) train_path = get_wmt_enfr_train_set(path) dev_path = get_wmt_enfr_dev_set(path) return train_path, dev_path
Load WMT'15 English-to-French translation dataset. It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set. Returns the directories of training data and test data. Parameters ---------- path : str The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``. References ---------- - Code modified from /tensorflow/models/rnn/translation/data_utils.py Notes ----- Usually, it will take a long time to download this dataset.
16,735
def envelope(self, header, body): env = Element("Envelope", ns=envns) env.addPrefix(Namespace.xsins[0], Namespace.xsins[1]) env.append(header) env.append(body) return env
Build the B{<Envelope/>} for a SOAP outbound message. @param header: The SOAP message B{header}. @type header: L{Element} @param body: The SOAP message B{body}. @type body: L{Element} @return: The SOAP envelope containing the body and header. @rtype: L{Element}
16,736
def read_config(self, filename): if not os.path.exists(filename): raise Exception("Configuration file cannot be found: %s" % filename) with io.open(filename, encoding=) as stream: return yaml.safe_load(stream)
Returns data found in config file (as dict), or raises exception if file not found
16,737
def save_uca(self, rootpath, raw=False, as_int=False): self.save_array(self.uca, None, , rootpath, raw, as_int=as_int)
Saves the upstream contributing area to a file
16,738
def add(self, *args): i = 1 row = [] for button in args: row.append(button.to_dic()) if i % self.row_width == 0: self.keyboard.append(row) row = [] i += 1 if len(row) > 0: self.keyboard.append(row)
This function adds strings to the keyboard, while not exceeding row_width. E.g. ReplyKeyboardMarkup#add("A", "B", "C") yields the json result {keyboard: [["A"], ["B"], ["C"]]} when row_width is set to 1. When row_width is set to 2, the following is the result of this function: {keyboard: [["A", "B"], ["C"]]} See https://core.telegram.org/bots/api#replykeyboardmarkup :param args: KeyboardButton to append to the keyboard
16,739
def addfield(self, pkt, s, i): if i is None: if self.length_of is not None: fld, fval = pkt.getfield_and_val(self.length_of) tmp = pkt.tls_session.frozen pkt.tls_session.frozen = True f = fld.i2len(pkt, fval) pkt.tls_session.frozen = tmp i = self.adjust(pkt, f) if i == 0: return s return s + struct.pack(self.fmt, i)
There is a hack with the _ExtensionsField.i2len. It works only because we expect _ExtensionsField.i2m to return a string of the same size (if not of the same value) upon successive calls (e.g. through i2len here, then i2m when directly building the _ExtensionsField). XXX A proper way to do this would be to keep the extensions built from the i2len call here, instead of rebuilding them later on.
16,740
async def track_event(event, state, service_name): redis = await aioredis.create_redis( (EVENT_TRACKING_HOST, 6379), loop=loop) now = datetime.utcnow() event_id = event.event_id tracking_data = json.dumps({ "event_id": event_id, "timestamp": str(now), "state": state }) await redis.rpush(service_name, tracking_data) redis.close() await redis.wait_closed()
Store state of events in memory :param event: Event object :param state: EventState object :param service_name: Name of service name
16,741
def quantile(y, k=4): w = 100. / k p = np.arange(w, 100 + w, w) if p[-1] > 100.0: p[-1] = 100.0 q = np.array([stats.scoreatpercentile(y, pct) for pct in p]) q = np.unique(q) k_q = len(q) if k_q < k: Warn(, UserWarning) Warn( % k_q, UserWarning) return q
Calculates the quantiles for an array Parameters ---------- y : array (n,1), values to classify k : int number of quantiles Returns ------- q : array (n,1), quantile values Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(1000) >>> mc.classifiers.quantile(x) array([249.75, 499.5 , 749.25, 999. ]) >>> mc.classifiers.quantile(x, k = 3) array([333., 666., 999.]) Note that if there are enough ties that the quantile values repeat, we collapse to pseudo quantiles in which case the number of classes will be less than k >>> x = [1.0] * 100 >>> x.extend([3.0] * 40) >>> len(x) 140 >>> y = np.array(x) >>> mc.classifiers.quantile(y) array([1., 3.])
16,742
def set_ccc(ctx, management_key, pin): controller = ctx.obj[] _ensure_authenticated(ctx, controller, pin, management_key) controller.update_ccc()
Generate and set a CCC on the YubiKey.
16,743
def working_yesterday(self, date_from=None, date_format=None): return self.delta(days=-1, date_from=date_from, date_format=date_format, days_range=[1, 2, 3, 4, 5, 6])
Retourne la date d'hier depuis maintenant ou depuis une date fournie seulement sur les jours ouvrableq. Ainsi lundi devient samedi et samedi devient vendredi :param: :date_from date de référence :return datetime
16,744
def __RetrieveContent(host, port, adapter, version, path, keyFile, certFile, thumbprint, sslContext, connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC): if adapter != "SOAP": raise ValueError(adapter) stub = SoapStubAdapter(host, port, version=version, path=path, certKeyFile=keyFile, certFile=certFile, thumbprint=thumbprint, sslContext=sslContext, connectionPoolTimeout=connectionPoolTimeout) si = vim.ServiceInstance("ServiceInstance", stub) content = None try: content = si.RetrieveContent() except vmodl.MethodFault: raise except Exception as e: (type, value, traceback) = sys.exc_info() if traceback: fault = vim.fault.HostConnectFault(msg=str(e)) reraise(vim.fault.HostConnectFault, fault, traceback) else: raise vim.fault.HostConnectFault(msg=str(e)) return content, si, stub
Retrieve service instance for connection. @param host: Which host to connect to. @type host: string @param port: Port @type port: int @param adapter: Adapter @type adapter: string @param version: Version @type version: string @param path: Path @type path: string @param keyFile: ssl key file path @type keyFile: string @param certFile: ssl cert file path @type certFile: string @param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never closing the connections @type connectionPoolTimeout: int
16,745
def run(self, root): math_tag_class = self.pelican_mathjax_extension.getConfig() for parent in root: div_math = [] children = list(parent) for div in parent.findall(): if div.get() == math_tag_class: div_math.append(children.index(div)) if not div_math: continue insert_idx = list(root).index(parent) self.correct_html(root, children, div_math, insert_idx, parent.text) root.remove(parent) return root
Searches for <div class="math"> that are children in <p> tags and corrects the invalid HTML that results
16,746
async def invoke(self, *args, **kwargs): r try: command = args[0] except IndexError: raise TypeError() from None arguments = [] if command.cog is not None: arguments.append(command.cog) arguments.append(self) arguments.extend(args[1:]) ret = await command.callback(*arguments, **kwargs) return ret
r"""|coro| Calls a command with the arguments given. This is useful if you want to just call the callback that a :class:`.Command` holds internally. Note ------ You do not pass in the context as it is done for you. Warning --------- The first parameter passed **must** be the command being invoked. Parameters ----------- command: :class:`.Command` A command or subclass of a command that is going to be called. \*args The arguments to to use. \*\*kwargs The keyword arguments to use.
16,747
def show_data(self, item): child, cookie = self.mainview_tree.GetFirstChild(item) child_list = [] while child.IsOk(): child_list.append(child) child, cookie = self.mainview_tree.GetNextChild(item, cookie) lc = self.nodeview_lc lc.DeleteAllItems() for i, child in enumerate(child_list): text = self.mainview_tree.GetItemText(child) try: k, v = [s.strip() for s in text.split()] except ValueError: k, v = text, idx = lc.InsertItem(MAXNROW, v) lc.SetItem(idx, 1, k)
show data key-value in ListCtrl for tree item
16,748
def fit_model(ts, sc=None): assert sc != None, "Missing SparkContext" jvm = sc._jvm jmodel = jvm.com.cloudera.sparkts.models.GARCH.fitModel(_py2java(sc, Vectors.dense(ts))) return GARCHModel(jmodel=jmodel, sc=sc)
Fits a GARCH(1, 1) model to the given time series. Parameters ---------- ts: the time series to which we want to fit a GARCH model as a Numpy array Returns a GARCH model
16,749
def update_vlan(self, name, vid, vni): cmd = % (vid, vni) return self.configure_interface(name, cmd)
Adds a new vlan to vni mapping for the interface EosVersion: 4.13.7M Args: vlan (str, int): The vlan id to map to the vni vni (str, int): The vni value to use Returns: True if the command completes successfully
16,750
def plot(cg): from androguard.core.analysis.analysis import ExternalMethod import matplotlib.pyplot as plt import networkx as nx pos = nx.spring_layout(cg) internal = [] external = [] for n in cg.node: if isinstance(n, ExternalMethod): external.append(n) else: internal.append(n) nx.draw_networkx_nodes(cg, pos=pos, node_color=, nodelist=internal) nx.draw_networkx_nodes(cg, pos=pos, node_color=, nodelist=external) nx.draw_networkx_edges(cg, pos, arrow=True) nx.draw_networkx_labels(cg, pos=pos, labels={x: "{} {}".format(x.get_class_name(), x.get_name()) for x in cg.edge}) plt.draw() plt.show()
Plot the call graph using matplotlib For larger graphs, this should not be used, as it is very slow and probably you can not see anything on it. :param cg: A networkx call graph to plot
16,751
def _on_timeout(self, _attempts=0): if self._connection is None and _attempts < 3: self._timer = self.session.cluster.connection_class.create_timer( 0.01, partial(self._on_timeout, _attempts=_attempts + 1) ) return if self._connection is not None: try: self._connection._requests.pop(self._req_id) except KeyError: return pool = self.session._pools.get(self._current_host) if pool and not pool.is_shutdown: with self._connection.lock: self._connection.request_ids.append(self._req_id) pool.return_connection(self._connection) errors = self._errors if not errors: if self.is_schema_agreed: key = str(self._current_host.endpoint) if self._current_host else errors = {key: "Client request timeout. See Session.execute[_async](timeout)"} else: connection = self.session.cluster.control_connection._connection host = str(connection.endpoint) if connection else errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."} self._set_final_exception(OperationTimedOut(errors, self._current_host))
Called when the request associated with this ResponseFuture times out. This function may reschedule itself. The ``_attempts`` parameter tracks the number of times this has happened. This parameter should only be set in those cases, where ``_on_timeout`` reschedules itself.
16,752
def _get_paths(): import os base_path = os.path.dirname(os.path.abspath(__file__)) test_data_dir = os.path.join(base_path, , , ) test_data_file = os.path.join(test_data_dir, ) return test_data_dir, test_data_file
Generate paths to test data. Done in a function to protect namespace a bit.
16,753
def disqus_sso_script(context): settings = context["settings"] public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "") secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "") user = context["request"].user if public_key and secret_key and user.is_authenticated(): context["public_key"] = public_key context["sso_data"] = _get_disqus_sso(user, public_key, secret_key) return context
Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified.
16,754
def resetPassword(self, email=True): url = self.root + "/reset" params = { "f" : "json", "email" : email } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
resets a users password for an account. The password will be randomly generated and emailed by the system. Input: email - boolean that an email password will be sent to the user's profile email address. The default is True.
16,755
def file_needs_update(target_file, source_file): if not os.path.isfile(target_file) or get_md5_file_hash(target_file) != get_md5_file_hash(source_file): return True return False
Checks if target_file is not existing or differing from source_file :param target_file: File target for a copy action :param source_file: File to be copied :return: True, if target_file not existing or differing from source_file, else False :rtype: False
16,756
def build(self, pre=None, shortest=False): global REF_LEVEL REF_LEVEL += 1 try: if pre is None: pre = [] definition = self.fuzzer.get_ref(self.cat, self.refname) res = utils.val( definition, pre, shortest=(shortest or REF_LEVEL >= self.max_recursion) ) return res finally: REF_LEVEL -= 1
Build the ``Ref`` instance by fetching the rule from the GramFuzzer instance and building it :param list pre: The prerequisites list :param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
16,757
def to_dict(self, fields=_all_fields, labels=None): fields = set(fields) diff = fields.difference(_all_fields) if isinstance(labels, Sequence): labels = _map_labels(self, labels) elif labels is None: labels = {} if diff: raise ValueError( .format(.join(diff)) ) return _to_dict(self, fields, labels)
Encode the node as a dictionary suitable for JSON serialization. Args: fields: if given, this is a whitelist of fields to include on nodes (`daughters` and `form` are always shown) labels: optional label annotations to embed in the derivation dict; the value is a list of lists matching the structure of the derivation (e.g., `["S" ["NP" ["NNS" ["Dogs"]]] ["VP" ["VBZ" ["bark"]]]]`) Returns: dict: the dictionary representation of the structure
16,758
def split_unquoted_newlines(stmt): text = text_type(stmt) lines = SPLIT_REGEX.split(text) outputlines = [] for line in lines: if not line: continue elif LINE_MATCH.match(line): outputlines.append() else: outputlines[-1] += line return outputlines
Split a string on all unquoted newlines. Unlike str.splitlines(), this will ignore CR/LF/CR+LF if the requisite character is inside of a string.
16,759
def load_backends(self): for name, backend_settings in settings.storage.iteritems(): backend_path = backend_settings[] backend_module, backend_cls = backend_path.rsplit(, 1) backend_module = import_module(backend_module) backend_constructor = getattr(backend_module, backend_cls) self.backends[name] = backend_constructor(name, self.namespaces, **backend_settings)
Loads all the backends setup in settings.py.
16,760
def pattern_to_regex(cls, pattern): if isinstance(pattern, unicode): return_type = unicode elif isinstance(pattern, bytes): return_type = bytes pattern = pattern.decode(_BYTES_ENCODING) else: raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern)) pattern = pattern.strip() if pattern.startswith(): regex = None include = None elif pattern == : regex = None include = None elif pattern: if pattern.startswith(): include = False pattern = pattern[1:] else: include = True if pattern.startswith(): pattern = pattern[1:] pattern_segs = pattern.split() if not pattern_segs[0]: del pattern_segs[0] elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]): if pattern_segs[0] != : pattern_segs.insert(0, ) else: pass if not pattern_segs[-1] and len(pattern_segs) > 1: pattern_segs[-1] = output = [] need_slash = False end = len(pattern_segs) - 1 for i, seg in enumerate(pattern_segs): if seg == : if i == 0 and i == end: output.append() elif i == 0: output.append() need_slash = False elif i == end: output.append() else: output.append() need_slash = True elif seg == : if need_slash: output.append() output.append() need_slash = True else: if need_slash: output.append() output.append(cls._translate_segment_glob(seg)) if i == end and include is True: output.append() need_slash = True output.append() regex = .join(output) else: regex = None include = None if regex is not None and return_type is bytes: regex = regex.encode(_BYTES_ENCODING) return regex, include
Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`).
16,761
def get_list(self, id, name=None): return self.create_list(dict(id=id, name=name))
Get a list Returns: List: The list with the given `id`
16,762
def preprocessing_declaration(job, config): if config.preprocessing: job.fileStore.logToMaster( + config.uuid) disk = if config.ci_test else mem = if config.ci_test else processed_normal = job.wrapJobFn(run_gatk_preprocessing, config.normal_bam, config.normal_bai, config.reference, config.dict, config.fai, config.phase, config.mills, config.dbsnp, mem, cores=1, memory=mem, disk=disk) processed_tumor = job.wrapJobFn(run_gatk_preprocessing, config.tumor_bam, config.tumor_bai, config.reference, config.dict, config.fai, config.phase, config.mills, config.dbsnp, mem, cores=1, memory=mem, disk=disk) static_workflow = job.wrapJobFn(static_workflow_declaration, config, processed_normal.rv(0), processed_normal.rv(1), processed_tumor.rv(0), processed_tumor.rv(1)) job.addChild(processed_normal) job.addChild(processed_tumor) job.addFollowOn(static_workflow) else: job.addFollowOnJobFn(static_workflow_declaration, config, config.normal_bam, config.normal_bai, config.tumor_bam, config.tumor_bai)
Declare jobs related to preprocessing :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs
16,763
def add_transformers(line): assert FROM_EXPERIMENTAL.match(line) line = FROM_EXPERIMENTAL.sub(, line) line = line.split(" for trans in line.replace(, ).split(): import_transformer(trans)
Extract the transformers names from a line of code of the form from __experimental__ import transformer1 [,...] and adds them to the globally known dict
16,764
async def send(self, message: Union[str, bytes], binary: bool=False, compress: Optional[int]=None) -> None: if isinstance(message, str): message = message.encode() if binary: await self._send_frame(message, WSMsgType.BINARY, compress) else: await self._send_frame(message, WSMsgType.TEXT, compress)
Send a frame over the websocket with message as its payload.
16,765
def initialize(self, training_info, model, environment, device): if self.trust_region: self.average_model = self.model_factory.instantiate(action_space=environment.action_space).to(device) self.average_model.load_state_dict(model.state_dict())
Initialize policy gradient from reinforcer settings
16,766
def _prepare_sample(data, run_folder): want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"]) out = {} for k, v in data.items(): if k in want: out[k] = _relative_paths(v, run_folder) if "algorithm" not in out: analysis, algorithm = _select_default_algorithm(out.get("analysis")) out["algorithm"] = algorithm out["analysis"] = analysis description = "%s-%s" % (out["name"], clean_name(out["description"])) out["name"] = [out["name"], description] out["description"] = description return out
Extract passed keywords from input LIMS information.
16,767
def check_privatenet(self): rpc_settings.setup(self.RPC_LIST) client = RPCClient() try: version = client.get_version() except NEORPCException: raise PrivnetConnectionError("Error: private network container doesn%srm -rf %s*'." % self.chain_leveldb_path ) else: if not os.path.isdir(self.chain_leveldb_path): os.mkdir(self.chain_leveldb_path) with open(neopy_chain_meta_filename, "w") as f: f.write(nonce_container)
Check if privatenet is running, and if container is same as the current Chains/privnet database. Raises: PrivnetConnectionError: if the private net couldn't be reached or the nonce does not match
16,768
def pilot_PLL(xr,fq,fs,loop_type,Bn,zeta): T = 1/float(fs) Kv = 1.0 Norder = 5 b_lp,a_lp = signal.butter(Norder,2*(fq/2.)/float(fs)) fstate = np.zeros(Norder) Kv = 2*np.pi*Kv if loop_type == 1: fn = Bn Kt = 2*np.pi*fn elif loop_type == 2: fn = 1/(2*np.pi)*2*Bn/(zeta + 1/(4*zeta)) Kt = 4*np.pi*zeta*fn a = np.pi*fn/zeta else: print() filt_in_last = 0 filt_out_last = 0 vco_in_last = 0 vco_out = 0 vco_out_last = 0 n = np.arange(0,len(xr)) theta = np.zeros(len(xr)) ev = np.zeros(len(xr)) phi_error = np.zeros(len(xr)) for kk in range(len(n)): phi_error[kk] = 2*xr[kk]*np.sin(vco_out) phi_error[kk],fstate = signal.lfilter(b_lp,a_lp,np.array([phi_error[kk]]),zi=fstate) pd_out = phi_error[kk] gain_out = Kt/Kv*pd_out if loop_type == 2: filt_in = a*gain_out filt_out = filt_out_last + T/2.*(filt_in + filt_in_last) filt_in_last = filt_in filt_out_last = filt_out filt_out = filt_out + gain_out else: filt_out = gain_out vco_in = filt_out + fq/(Kv/(2*np.pi)) vco_out = vco_out_last + T/2.*(vco_in + vco_in_last) vco_in_last = vco_in vco_out_last = vco_out vco_out = Kv*vco_out ev[kk] = filt_out theta[kk] = np.mod(vco_out,2*np.pi); return theta,phi_error
theta, phi_error = pilot_PLL(xr,fq,fs,loop_type,Bn,zeta) Mark Wickert, April 2014
16,769
def create_context_menu(self, event, shape): routes = self.df_routes.loc[self.df_routes.electrode_i == shape, ].astype(int).unique().tolist() def _connect_callback(menu_item, command_signal, group, command, data): callback_called = threading.Event() def _callback(signal, widget, *args): if callback_called.is_set(): return callback_called.set() _L().debug(, signal, group, command, data) gtk.idle_add(self.emit, command_signal, group, command, data) menu_item.connect(, ft.partial(_callback, )) menu_item.connect(, ft.partial(_callback, )) if group is not None: menu_item.set_tooltip_text(group) menu = gtk.Menu() if self.global_commands: data = {: event.copy()} command_signal = for group, commands in self.global_commands.iteritems(): for command, title in commands.iteritems(): menu_item_j = gtk.MenuItem(title) menu.append(menu_item_j) _connect_callback(menu_item_j, command_signal, group, command, data) if self.electrode_commands: separator = gtk.SeparatorMenuItem() menu.append(separator) menu_e = gtk.Menu() menu_head_e = gtk.MenuItem() menu_head_e.set_submenu(menu_e) menu_head_e.set_use_underline(True) menu.append(menu_head_e) command_signal = data = {: shape, : event.copy()} for group, commands in self.electrode_commands.iteritems(): for command, title in commands.iteritems(): menu_item_j = gtk.MenuItem(title) menu_e.append(menu_item_j) _connect_callback(menu_item_j, command_signal, group, command, data) if routes and self.route_commands: separator = gtk.SeparatorMenuItem() menu.append(separator) menu_r = gtk.Menu() menu_head_r = gtk.MenuItem() menu_head_r.set_submenu(menu_r) menu_head_r.set_use_underline(True) menu.append(menu_head_r) command_signal = data = {: routes, : event.copy()} for group, commands in self.route_commands.iteritems(): for command, title in commands.iteritems(): menu_item_j = gtk.MenuItem(title) menu_r.append(menu_item_j) _connect_callback(menu_item_j, command_signal, group, command, data) menu.show_all() return menu
Parameters ---------- event : gtk.gdk.Event GTK mouse click event. shape : str Electrode shape identifier (e.g., `"electrode028"`). Returns ------- gtk.Menu Context menu. .. versionchanged:: 0.13 - Deprecate hard-coded commands (e.g., clear electrodes, clear routes). - Add anonymous global commands section at head of menu (i.e., commands not specific to an electrode or route). - Add "Electrode" and "Route(s)" sub-menus.
16,770
def delete_fastqs(job, patient_dict): for key in patient_dict.keys(): if not in key: continue job.fileStore.logToMaster( % (patient_dict[], key) + ) job.fileStore.deleteGlobalFile(patient_dict[key]) return None
Delete the fastqs from the job Store once their purpose has been achieved (i.e. after all mapping steps) :param dict patient_dict: Dict of list of input fastqs
16,771
async def update_template_context(self, context: dict) -> None: processors = self.template_context_processors[None] if has_request_context(): blueprint = _request_ctx_stack.top.request.blueprint if blueprint is not None and blueprint in self.template_context_processors: processors = chain(processors, self.template_context_processors[blueprint]) extra_context: dict = {} for processor in processors: extra_context.update(await processor()) original = context.copy() context.update(extra_context) context.update(original)
Update the provided template context. This adds additional context from the various template context processors. Arguments: context: The context to update (mutate).
16,772
def tangent_approx(f: SYM, x: SYM, a: SYM = None, assert_linear: bool = False) -> Dict[str, SYM]: if a is None: a = ca.DM.zeros(x.numel(), 1) f_a = ca.substitute(f, x, a) J = ca.jacobian(f, x) if assert_linear and ca.depends_on(J, x): raise AssertionError() return ca.solve(J, -f_a)
Create a tangent approximation of a non-linear function f(x) about point a using a block lower triangular solver 0 = f(x) = f(a) + J*x # taylor series about a (if f(x) linear in x, then globally valid) J*x = -f(a) # solve for x x = -J^{-1}f(a) # but inverse is slow, so we use solve where J = df/dx
16,773
def parse(self): url = self.config.get() self.cnml = CNMLParser(url) self.parsed_data = self.cnml.getNodes()
parse data
16,774
def get_qtls_from_mapqtl_data(matrix, threshold, inputfile): trait_name = inputfile.split(, 1)[1].split()[0] qtls = [] qtl = None for entry in matrix[1:]: if qtl is None: qtl = entry if qtl[1] != entry[1]: if float(qtl[4]) > float(threshold): qtl[0] = trait_name qtls.append(qtl) qtl = entry if entry[4] == : entry[4] = 0 if qtl[4] == : qtl[4] = 0 if float(entry[4]) > float(qtl[4]): qtl = entry if float(qtl[4]) > float(threshold): qtl[0] = trait_name if qtl not in qtls: qtls.append(qtl) return qtls
Extract the QTLs found by MapQTL reading its file. This assume that there is only one QTL per linkage group. :arg matrix, the MapQTL file read in memory :arg threshold, threshold used to determine if a given LOD value is reflective the presence of a QTL. :arg inputfile, name of the inputfile in which the QTLs have been found
16,775
def get_importable_modules(folder): for fname in os.listdir(folder): if fname.endswith() and not fname.startswith(): yield fname[:-3]
Find all module files in the given folder that end with '.py' and don't start with an underscore. @return module names @rtype: iterator of string
16,776
def _cleanup_label(label): conjunctions = [, , , , , ] little_preps = [ , , , , , , , , , ] articles = [, , ] lbl = label.split(r)[0] fixedwords = [] i = 0 for wrd in lbl.split(): i += 1 if i > 1 and re.match(romanNumeralPattern, wrd): n = fromRoman(wrd) if 0 < n < 100: suffix = wrd.replace(toRoman(n), , 1) fixed = .join((str(n), suffix)) wrd = fixed wrd = wrd.title() if wrd.lower() in (conjunctions+little_preps+articles) and i != 1: wrd = wrd.lower() fixedwords.append(wrd) lbl = .join(fixedwords) return lbl
Reformat the ALL CAPS OMIM labels to something more pleasant to read. This will: 1. remove the abbreviation suffixes 2. convert the roman numerals to integer numbers 3. make the text title case, except for suplied conjunctions/prepositions/articles :param label: :return:
16,777
def prefix_dict_keys(d: Dict[str, Any], prefix: str) -> Dict[str, Any]: result = {} for k, v in d.items(): result[prefix + k] = v return result
Returns a dictionary that's a copy of as ``d`` but with ``prefix`` prepended to its keys.
16,778
def delete(self): self.room.check_owner() self.conn.make_call("deleteFiles", [self.fid])
Remove this file
16,779
def get(self, request, *args, **kwargs): response = super(EntryProtectionMixin, self).get( request, *args, **kwargs) if self.object.login_required and not request.user.is_authenticated: return self.login() if (self.object.password and self.object.password != self.request.session.get(self.session_key % self.object.pk)): return self.password() return response
Do the login and password protection.
16,780
def makeNetwork(self): if "weight" in self.data_friendships.keys(): self.G=G=x.DiGraph() else: self.G=G=x.Graph() F=self.data_friends for friendn in range(self.n_friends): if "posts" in F.keys(): G.add_node(F["name"][friendn], label=F["label"][friendn], posts=F["posts"][friendn]) elif "agerank" in F.keys(): G.add_node(F["name"][friendn], label=F["label"][friendn], gender=F["sex"][friendn], locale=F["locale"][friendn], agerank=F["agerank"][friendn]) else: G.add_node(F["name"][friendn], label=F["label"][friendn], gender=F["sex"][friendn], locale=F["locale"][friendn]) F=self.data_friendships for friendshipn in range(self.n_friendships): if "weight" in F.keys(): G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn],weight=F["weight"][friendshipn]) else: G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn])
Makes graph object from .gdf loaded data
16,781
def get_or_create_head(root): head = _create_cssselector("head")(root) if not head: head = etree.Element("head") body = _create_cssselector("body")(root)[0] body.getparent().insert(0, head) return head else: return head[0]
Ensures that `root` contains a <head> element and returns it.
16,782
def search(self, text, lookup=None): return self.query().search(text, lookup=lookup)
Returns a new :class:`Query` for :attr:`Manager.model` with a full text search value.
16,783
def revoke_token(self, token, headers=None, **kwargs): self._check_configuration("site", "revoke_uri") url = "%s%s" % (self.site, quote(self.revoke_url)) data = {: token} data.update(kwargs) return self._make_request(url, data=data, headers=headers)
Revoke an access token
16,784
def __call_api(self, path, params=None, api_url=FORECAST_URL): if not params: params = dict() payload = {: self.api_key} payload.update(params) url = "%s/%s" % (api_url, path) sess = self.__retry_session() req = sess.get(url, params=payload, timeout=1) try: data = req.json() except ValueError: raise APIException("DataPoint has not returned any data, this could be due to an incorrect API key") self.call_response = data if req.status_code != 200: msg = [data[m] for m in ("message", "error_message", "status") \ if m in data][0] raise Exception(msg) return data
Call the datapoint api using the requests module
16,785
def plot_3d_dist(Z, X, Y, N=1000, AxisOffset=0, Angle=-40, LowLim=None, HighLim=None, show_fig=True): angle = Angle fig = _plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection=) y = Z[0:N] x = X[0:N] z = Y[0:N] ax.scatter(x, y, z, alpha=0.3) xlim = ax.get_xlim() ylim = ax.get_ylim() zlim = ax.get_zlim() if LowLim != None: lowLim = LowLim - AxisOffset else: lowLim = min([xlim[0], ylim[0], zlim[0]]) - AxisOffset if HighLim != None: highLim = HighLim + AxisOffset else: highLim = max([xlim[1], ylim[1], zlim[1]]) + AxisOffset ax.set_xlim([lowLim, highLim]) ax.set_ylim([lowLim, highLim]) ax.set_zlim([lowLim, highLim]) ax.set_xlabel("x") ax.set_ylabel("z") ax.set_zlabel("y") ax.view_init(30, angle) h, yedges, zedges = _np.histogram2d(y, z, bins=50) h = h.transpose() normalized_map = _plt.cm.Blues(h/h.max()) yy, zz = _np.meshgrid(yedges, zedges) xpos = lowLim xflat = _np.full_like(yy, xpos) p = ax.plot_surface(xflat, yy, zz, facecolors=normalized_map, rstride=1, cstride=1, shade=False) h, xedges, zedges = _np.histogram2d(x, z, bins=50) h = h.transpose() normalized_map = _plt.cm.Blues(h/h.max()) xx, zz = _np.meshgrid(xedges, zedges) ypos = highLim yflat = _np.full_like(xx, ypos) p = ax.plot_surface(xx, yflat, zz, facecolors=normalized_map, rstride=1, cstride=1, shade=False) h, yedges, xedges = _np.histogram2d(y, x, bins=50) h = h.transpose() normalized_map = _plt.cm.Blues(h/h.max()) yy, xx = _np.meshgrid(yedges, xedges) zpos = lowLim zflat = _np.full_like(yy, zpos) p = ax.plot_surface(xx, yy, zflat, facecolors=normalized_map, rstride=1, cstride=1, shade=False) if show_fig == True: _plt.show() return fig, ax
Plots Z, X and Y as a 3d scatter plot with heatmaps of each axis pair. Parameters ---------- Z : ndarray Array of Z positions with time X : ndarray Array of X positions with time Y : ndarray Array of Y positions with time N : optional, int Number of time points to plot (Defaults to 1000) AxisOffset : optional, double Offset to add to each axis from the data - used to get a better view of the heat maps (Defaults to 0) LowLim : optional, double Lower limit of x, y and z axis HighLim : optional, double Upper limit of x, y and z axis show_fig : optional, bool Whether to show the produced figure before returning Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The subplot object created
16,786
def trade_signals_handler(self, signals): alloc = {} if signals[] or signals[]: try: alloc, e_ret, e_risk = self.optimize( self.date, signals[], signals[], self._optimizer_parameters) except Exception, error: raise PortfolioOptimizationFailed( reason=error, date=self.date, data=signals) return _remove_useless_orders(alloc)
Process buy and sell signals from the simulation
16,787
def _kwarg(self, kwargs, kwname, default=None): return kwargs.get(kwname) or \ self.settings.config.get(self.name, {}).get(kwname) or \ self.settings.config.get(, {}).get(kwname) or \ default
Resolves keyword arguments from constructor or :doc:`config`. .. note:: The keyword arguments take this order of precedence: 1. Arguments passed to constructor through the :func:`authomatic.login`. 2. Provider specific arguments from :doc:`config`. 3. Arguments from :doc:`config` set in the ``__defaults__`` key. 2. The value from :data:`default` argument. :param dict kwargs: Keyword arguments dictionary. :param str kwname: Name of the desired keyword argument.
16,788
def to_csv(weekmatrices, filename, digits=5): with open(filename, ) as f: w = csv.writer(f, lineterminator=) w.writerow([, , , , ]) def make_repr(item): if item is None: return None elif isinstance(item, float): return repr(round(item, digits)) else: return str(item) for row in weekmatrices: w.writerow([make_repr(item) for item in row])
Exports a list of week-matrices to a specified filename in the CSV format. Parameters ---------- weekmatrices : list The week-matrices to export. filename : string Path for the exported CSV file.
16,789
def plot_clock_diagrams(self, colormap="summer"): cmap = plt.get_cmap(colormap) for res in self.topology_data.dict_of_plotted_res: colors = [cmap(i) for i in numpy.linspace(0, 1, len(self.topology_data.dict_of_plotted_res[res]))] traj_colors_ = {traj:colors[i] for i,traj in enumerate(self.topology_data.dict_of_plotted_res[res])} plt.figure(figsize=(2.25, 2.25)) ring_number=[sum(1 for x in v if x) for k,v in self.topology_data.dict_of_plotted_res.items()][0] self.topology_data.ring_number = ring_number rings=[] if ring_number<2: width = 0.3 else: width = 0.2 for ring in range(0,ring_number): ring,_=plt.pie([self.topology_data.dict_of_plotted_res[res][ring],1-self.topology_data.dict_of_plotted_res[res][ring]], radius=0.9+width*(ring+1), startangle=90, colors=[colors[ring],"white"], counterclock=False) rings=rings+ring plt.setp(rings, width=width) if len(self.topology_data.universe.protein.segments)<=1: plt.text(-0.0,-0.62,res[0]+"\n"+res[1],ha=,size=32, fontweight=) else: plt.text(-0.0,-0.72,res[0]+"\n"+res[1]+"\n"+res[2],ha=,size=25, fontweight=) pylab.savefig(res[1]+res[2]+".svg", dpi=300, transparent=True)
Ploting clock diagrams - one or more rings around residue name and id (and chain id). The rings show the fraction of simulation time this residue has spent in the vicinity of the ligand - characterised by distance.
16,790
def get_attribute_values(self, att_name): lc = [] if not CPEComponent.is_valid_attribute(att_name): errmsg = "Invalid attribute name: {0}".format(att_name) raise ValueError(errmsg) for pk in CPE.CPE_PART_KEYS: elements = self.get(pk) for elem in elements: comp = elem.get(att_name) if (isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentUndefined)): value = CPEComponent1_1.VALUE_EMPTY else: value = comp.get_value() lc.append(value) return lc
Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name
16,791
def list_vms_sub(access_token, subscription_id): endpoint = .join([get_rm_endpoint(), , subscription_id, , , COMP_API]) return do_get_next(endpoint, access_token)
List VMs in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of a list of VM model views.
16,792
def send_async(self, transaction, headers=None): return self.transport.forward_request( method=, path=self.path, json=transaction, params={: }, headers=headers)
Submit a transaction to the Federation with the mode `async`. Args: transaction (dict): the transaction to be sent to the Federation node(s). headers (dict): Optional headers to pass to the request. Returns: dict: The transaction sent to the Federation node(s).
16,793
def collection_keys(coll, sep=): def _keys(x, pre=): for k in x: yield (pre + k) if isinstance(x[k], dict): for nested in _keys(x[k], pre + k + sep): yield nested return list(_keys(coll.find_one()))
Get a list of all (including nested) keys in a collection. Examines the first document in the collection. :param sep: Separator for nested keys :return: List of str
16,794
def extract(args): p = OptionParser(extract.__doc__) p.add_option("--format", default=False, action="store_true", help="enable flag to reformat header into a symbol separated list of constituent reads "+ \ "[default: %default]") p.add_option("--singlets", default=False, action="store_true", help="ask the program to look in the singlets file (should be in the same folder) for " +\ "unused reads and put them in the resultant fasta file [default: %default]") p.set_sep(sep="|", help="Separator used to list the reads in the FASTA header") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) acefile, = args ace = Ace.read(must_open(acefile)) logging.debug(.format(acefile)) fastafile = acefile.rsplit(".", 1)[0] + ".fasta" fw = open(fastafile, "w") for c in ace.contigs: id = c.name if opts.format: id = opts.sep.join([read.name for read in c.af]) seqrec = SeqRecord(Seq(c.sequence), id=id, description="") SeqIO.write([seqrec], fw, "fasta") if opts.singlets: singletsfile = acefile.rsplit(".", 1)[0] + ".singlets" if os.path.getsize(singletsfile) > 0: fp = SeqIO.parse(must_open(singletsfile), "fasta") for rec in fp: SeqIO.write(rec, fw, "fasta") fw.close() logging.debug(.format(fastafile))
%prog extract [--options] ace_file Extract contigs from ace file and if necessary reformat header with a pipe(|) separated list of constituent reads.
16,795
def list_storage_accounts_rg(access_token, subscription_id, rgname): endpoint = .join([get_rm_endpoint(), , subscription_id, , rgname, , , STORAGE_API]) return do_get(endpoint, access_token)
List the storage accounts in the specified resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body list of storage accounts.
16,796
def display_monthly_returns(self): data = [[, , , , , , , , , , , , , ]] for k in self.return_table.index: r = self.return_table.loc[k].values data.append([k] + [fmtpn(x) for x in r]) print(tabulate(data, headers=))
Display a table containing monthly returns and ytd returns for every year in range.
16,797
def _get_rest_doc(self, request, start_response): api = request.body_json[] version = request.body_json[] generator = discovery_generator.DiscoveryGenerator(request=request) services = [s for s in self._backend.api_services if s.api_info.name == api and s.api_info.api_version == version] doc = generator.pretty_print_config_to_json(services) if not doc: error_msg = ( ) % (version, api) _logger.error(, error_msg) return util.send_wsgi_error_response(error_msg, start_response) return self._send_success_response(doc, start_response)
Sends back HTTP response with API directory. This calls start_response and returns the response body. It will return the discovery doc for the requested api/version. Args: request: An ApiRequest, the transformed request sent to the Discovery API. start_response: A function with semantics defined in PEP-333. Returns: A string, the response body.
16,798
def _run_hooks(self, name, module): hooks = self.post_load_hooks.pop(name, []) for hook in hooks: hook(module)
Run all hooks for a module.
16,799
def RemoveObject(self, identifier): if identifier not in self._values: raise KeyError(.format( identifier)) del self._values[identifier]
Removes a cached object based on the identifier. This method ignores the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache.