Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
386,700
def add_arg_param(self, param_name, layer_index, blob_index): self.add_param( % param_name, layer_index, blob_index)
Add an arg param to .params file. Example: weights of a fully connected layer.
386,701
def plot_prof_1(self, species, keystring, xlim1, xlim2, ylim1, ylim2, symbol=None, show=False): - plotType=self._classTest() if plotType==: tot_mass=self.se.get() age=self.se.get(keystring,) mass=self.se.get(keystring,) Xspecies=self.se.get(keystring,,species) mod=keystring elif plotType==: tot_mass=self.header_attr[] age=self.header_attr[] mass=self.get() mod=self.header_attr[] Xspecies=self.get(species) else: print(+str(self.__class__)) return if symbol == None: symbol = x,y=self._logarithm(Xspecies,mass,True,False,10) pl.plot(y,x,symbol,label=str(species)) pl.xlim(xlim1,xlim2) pl.ylim(ylim1,ylim2) pl.legend() pl.xlabel(, fontsize=20) pl.ylabel(, fontsize=20) pl.title(+str(tot_mass)++str(mod)) if show: pl.show()
Plot one species for cycle between xlim1 and xlim2 Only works with instances of se and mesa _profile. Parameters ---------- species : list Which species to plot. keystring : string or integer Label that appears in the plot or in the case of se, a cycle. xlim1, xlim2 : integer or float Mass coordinate range. ylim1, ylim2 : integer or float Mass fraction coordinate range. symbol : string, optional Which symbol you want to use. If None symbol is set to '-'. The default is None. show : boolean, optional Show the ploted graph. The default is False.
386,702
def start(info): cmd = options.paved.django.runserver if cmd == : try: import django_extensions except ImportError: info("Could not import django_extensions. Using default runserver.") cmd = port = options.paved.django.runserver_port if port: cmd = % (cmd, port) call_manage(cmd)
Run the dev server. Uses `django_extensions <http://pypi.python.org/pypi/django-extensions/0.5>`, if available, to provide `runserver_plus`. Set the command to use with `options.paved.django.runserver` Set the port to use with `options.paved.django.runserver_port`
386,703
def writefile(filename, content): with open(path_expand(filename), ) as outfile: outfile.write(content)
writes the content into the file :param filename: the filename :param content: teh content :return:
386,704
def crypto_validator(func): def func_in(*args, **kwargs): if not conf.crypto_valid: raise ImportError("Cannot execute crypto-related method! " "Please install python-cryptography v1.7 or later.") return func(*args, **kwargs) return func_in
This a decorator to be used for any method relying on the cryptography library. # noqa: E501 Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
386,705
def get_matrix(self): matrix = Matrix() cairo.cairo_get_matrix(self._pointer, matrix._pointer) self._check_status() return matrix
Return a copy of the current transformation matrix (CTM).
386,706
def set_config(self, **config): reinit = False if in config: stdopt = config.pop() reinit = (stdopt != self.stdopt) self.stdopt = stdopt if in config: attachopt = config.pop() reinit = reinit or (attachopt != self.attachopt) self.attachopt = attachopt if in config: attachvalue = config.pop() reinit = reinit or (attachvalue != self.attachvalue) self.attachvalue = attachvalue if in config: self.auto2dashes = config.pop() if in config: name = config.pop() reinit = reinit or (name != self.name) self.name = name if in config: self.help = config.pop() self._set_or_remove_extra_handler( self.help, (, ), self.help_handler) if in config: self.version = config.pop() self._set_or_remove_extra_handler( self.version is not None, (, ), self.version_handler) if in config: case_sensitive = config.pop() reinit = reinit or (case_sensitive != self.case_sensitive) self.case_sensitive = case_sensitive if in config: self.options_first = config.pop() if in config: self.appeared_only = config.pop() if in config: namedoptions = config.pop() reinit = reinit or (namedoptions != self.namedoptions) self.namedoptions = namedoptions if in config: self.extra.update(self._formal_extra(config.pop())) if config: raise ValueError( % ( .join(config), if len(config) == 1 else , if len(config) == 1 else )) if self.doc is not None and reinit: logger.warning( ) self._init()
Shadow all the current config.
386,707
def active(self): if not os.path.isfile(self._paths[]): return False return self._loaded
Returns if task is active.
386,708
def schema(self): if not self.__schema: context = getattr(self.parent, , {}) if isinstance(self.nested, SchemaABC): self.__schema = self.nested self.__schema.context.update(context) else: if isinstance(self.nested, type) and issubclass(self.nested, SchemaABC): schema_class = self.nested elif not isinstance(self.nested, basestring): raise ValueError( .format(self.nested.__class__), ) elif self.nested == : schema_class = self.parent.__class__ else: schema_class = class_registry.get_class(self.nested) self.__schema = schema_class( many=self.many, only=self.only, exclude=self.exclude, context=context, load_only=self._nested_normalized_option(), dump_only=self._nested_normalized_option(), ) return self.__schema
The nested Schema object. .. versionchanged:: 1.0.0 Renamed from `serializer` to `schema`
386,709
def QA_fetch_get_option_50etf_contract_time_to_market(): result = QA_fetch_get_option_list() meaningful_name rows = [] result[] = None for idx in result.index: strCategory = result.loc[idx, "category"] strMarket = result.loc[idx, "market"] strCode = result.loc[idx, "code"] strName = result.loc[idx, ] strDesc = result.loc[idx, ] if strName.startswith("510050"): if strName.startswith("510050C"): putcall = elif strName.startswith("510050P"): putcall = else: putcall = "Unkown code name : " + strName expireMonth = strName[7:8] if expireMonth == : expireMonth = "10月" elif expireMonth == : expireMonth = "11月" elif expireMonth == : expireMonth = "12月" else: expireMonth = expireMonth + if strName[8:9] == "M": adjust = "未调整" elif strName[8:9] == : adjust = " 第1次调整" elif strName[8:9] == : adjust = " 第2调整" elif strName[8:9] == : adjust = " 第3次调整" elif strName[8:9] == : adjust = " 第4次调整" elif strName[8:9] == : adjust = " 第5次调整" elif strName[8:9] == : adjust = " 第6次调整" elif strName[8:9] == : adjust = " 第7次调整" elif strName[8:9] == : adjust = " 第8次调整" elif strName[8:9] == : adjust = " 第9次调整" elif strName[8:9] == : adjust = " 第10次调整" else: adjust = " 第10次以上的调整,调整代码 %s" + strName[8:9] executePrice = strName[9:] result.loc[idx, ] = % ( putcall, expireMonth, adjust, executePrice) row = result.loc[idx] rows.append(row) return rows
#🛠todo 获取期权合约的上市日期 ? 暂时没有。 :return: list Series
386,710
def _resolve_file(file_name): if not file_name: return None path = os.path.realpath(file_name) if os.path.isfile(path): return path return None
Checks if the file exists. If the file exists, the method returns its absolute path. Else, it returns None :param file_name: The name of the file to check :return: An absolute path, or None
386,711
def run_mutation_aggregator(job, mutation_results, univ_options): out = {} for chrom in mutation_results[].keys(): out[chrom] = job.addChildJobFn(merge_perchrom_mutations, chrom, mutation_results, univ_options).rv() merged_snvs = job.addFollowOnJobFn(merge_perchrom_vcfs, out, , univ_options) job.fileStore.logToMaster( % univ_options[]) return merged_snvs.rv()
Aggregate all the called mutations. :param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome format :param dict univ_options: Dict of universal options used by almost all tools :returns: fsID for the merged mutations file :rtype: toil.fileStore.FileID
386,712
def _log(self, name, element): from bs4 import BeautifulSoup, Tag if isinstance(element, Response): LOGGER.debug(, name, element.url, element.status_code) elif isinstance(element, (BeautifulSoup, Tag)): LOGGER.debug(, name, element)
Log Response and Tag elements. Do nothing if elements is none of them.
386,713
def compute_difficulty( bomb_delay: int, parent_header: BlockHeader, timestamp: int) -> int: parent_timestamp = parent_header.timestamp validate_gt(timestamp, parent_timestamp, title="Header.timestamp") parent_difficulty = parent_header.difficulty offset = parent_difficulty // DIFFICULTY_ADJUSTMENT_DENOMINATOR has_uncles = parent_header.uncles_hash != EMPTY_UNCLE_HASH adj_factor = max( ( (2 if has_uncles else 1) - ((timestamp - parent_timestamp) // BYZANTIUM_DIFFICULTY_ADJUSTMENT_CUTOFF) ), -99, ) difficulty = max( parent_difficulty + offset * adj_factor, min(parent_header.difficulty, DIFFICULTY_MINIMUM) ) num_bomb_periods = ( max( 0, parent_header.block_number + 1 - bomb_delay, ) // BOMB_EXPONENTIAL_PERIOD ) - BOMB_EXPONENTIAL_FREE_PERIODS if num_bomb_periods >= 0: return max(difficulty + 2**num_bomb_periods, DIFFICULTY_MINIMUM) else: return difficulty
https://github.com/ethereum/EIPs/issues/100
386,714
def _tcpdump_callback(self, line, kill_switch): line = line.lower() if ("listening" in line) or ("reading" in line): self.started = True if ("no suitable device" in line): self.error = True self.kill_switch() if "by kernel" in line: self.stopped = True
Callback function to handle tcpdump
386,715
def write_stats_as_csv(gtfs, path_to_csv, re_write=False): stats_dict = get_stats(gtfs) if re_write: os.remove(path_to_csv) is_new = True mode = if os.path.exists(path_to_csv) else with open(path_to_csv, mode) as csvfile: for line in csvfile: if line: is_new = False else: is_new = True with open(path_to_csv, ) as csvfile: if (sys.version_info > (3, 0)): delimiter = u"," else: delimiter = b"," statswriter = csv.writer(csvfile, delimiter=delimiter) if is_new: statswriter.writerow([key for key in sorted(stats_dict.keys())]) row_to_write = [] for key in sorted(stats_dict.keys()): row_to_write.append(stats_dict[key]) statswriter.writerow(row_to_write)
Writes data from get_stats to csv file Parameters ---------- gtfs: GTFS path_to_csv: str filepath to the csv file to be generated re_write: insted of appending, create a new one.
386,716
def unattach_rconfiguration(context, id, rconfiguration_id): result = remoteci.delete_rconfiguration( context, id=id, rconfiguration_id=rconfiguration_id) if result.status_code == 204: utils.print_json( {: id, : }) else: utils.format_output(result, context.format)
unattach_rconfiguration(context, id, rconfiguration_id): Unattach a rconfiguration from a remoteci. >>> dcictl remoteci-unattach-rconfiguration id [OPTIONS] :param string id: ID of the remoteci to unattach the rconfiguration from [required] :param string rconfiguration_id: ID of the rconfiguration to unattach [required]
386,717
def get_site_type_dummy_variables(self, sites): is_rock = np.array(sites.vs30 > self.NEHRP_BC_BOUNDARY) return is_rock
Binary rock/soil classification dummy variable based on sites.vs30. "``S`` is 1 for a rock site and 0 otherwise" (p. 1201).
386,718
def cast_to_seq_record(obj, alphabet=IUPAC.extended_protein, id="<unknown id>", name="<unknown name>", description="<unknown description>", dbxrefs=None, features=None, annotations=None, letter_annotations=None): if isinstance(obj, SeqRecord): return obj if isinstance(obj, Seq): return SeqRecord(obj, id, name, description, dbxrefs, features, annotations, letter_annotations) if isinstance(obj, str): obj = obj.upper() return SeqRecord(Seq(obj, alphabet), id, name, description, dbxrefs, features, annotations, letter_annotations) else: raise ValueError()
Return a SeqRecord representation of a string or Seq object. Args: obj (str, Seq, SeqRecord): Sequence string or Biopython Seq object alphabet: See Biopython SeqRecord docs id: See Biopython SeqRecord docs name: See Biopython SeqRecord docs description: See Biopython SeqRecord docs dbxrefs: See Biopython SeqRecord docs features: See Biopython SeqRecord docs annotations: See Biopython SeqRecord docs letter_annotations: See Biopython SeqRecord docs Returns: SeqRecord: SeqRecord representation of the sequence
386,719
def _write_box_information(xml_file, structure, ref_distance): if np.allclose(structure.box[3:6], np.array([90, 90, 90])): box_str = xml_file.write(box_str.format(*structure.box[:3] / ref_distance)) else: a, b, c = structure.box[0:3] / ref_distance alpha, beta, gamma = np.radians(structure.box[3:6]) lx = a xy = b * np.cos(gamma) xz = c * np.cos(beta) ly = np.sqrt(b**2 - xy**2) yz = (b*c*np.cos(alpha) - xy*xz) / ly lz = np.sqrt(c**2 - xz**2 - yz**2) box_str = xml_file.write(box_str.format(lx, ly, lz, xy, xz, yz))
Write box information. Parameters ---------- xml_file : file object The file object of the hoomdxml file being written structure : parmed.Structure Parmed structure object ref_energy : float, default=1.0 Reference energy for conversion to reduced units
386,720
def sizeof(self, context=None) -> int: if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: return self._sizeof(context) except Error: raise except Exception as exc: raise SizeofError(str(exc))
Return the size of the construct in bytes. :param context: Optional context dictionary.
386,721
def get_feature_flag_by_name(self, name, check_feature_exists=None): route_values = {} if name is not None: route_values[] = self._serialize.url(, name, ) query_parameters = {} if check_feature_exists is not None: query_parameters[] = self._serialize.query(, check_feature_exists, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, query_parameters=query_parameters) return self._deserialize(, response)
GetFeatureFlagByName. [Preview API] Retrieve information on a single feature flag and its current states :param str name: The name of the feature to retrieve :param bool check_feature_exists: Check if feature exists :rtype: :class:`<FeatureFlag> <azure.devops.v5_0.feature_availability.models.FeatureFlag>`
386,722
def _get_nws_feed(self): url = % (str(self._state).lower()) xml = requests.get(url).content return xml
get nws alert feed, and cache it
386,723
def push(self, metric_name=None, metric_value=None, volume=None): graphite_path = self.path_prefix graphite_path += + self.device + + graphite_path += + volume + + metric_name metric = Metric(graphite_path, metric_value, precision=4, host=self.device) self.publish_metric(metric)
Ship that shit off to graphite broski
386,724
def _set_sip_ipv4_address(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={: u}), is_leaf=True, yang_name="sip-ipv4-address", rest_name="sip-ipv4-address", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "brocade-bgp:sip-ipv4-address", : , }) self.__sip_ipv4_address = t if hasattr(self, ): self._set()
Setter method for sip_ipv4_address, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ips/neighbor_addr/update_source/sip_ipv4_address (sip-ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_sip_ipv4_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_sip_ipv4_address() directly.
386,725
def _from_dict(cls, _dict): args = {} if in _dict: args[] = [ ToneScore._from_dict(x) for x in (_dict.get()) ] else: raise ValueError( tones\) if in _dict: args[] = _dict.get() else: raise ValueError( category_id\ ) if in _dict: args[] = _dict.get() else: raise ValueError( category_name\ ) return cls(**args)
Initialize a ToneCategory object from a json dictionary.
386,726
def create_bookmark_action(parent, url, title, icon=None, shortcut=None): @Slot() def open_url(): return programs.start_file(url) return create_action( parent, title, shortcut=shortcut, icon=icon, triggered=open_url)
Create bookmark action
386,727
def min_volatility(self): if not self.w: self.solve() var = [] for w in self.w: a = np.dot(np.dot(w.T, self.cov_matrix), w) var.append(a) self.weights = self.w[var.index(min(var))].reshape((self.n_assets,)) return dict(zip(self.tickers, self.weights))
Get the minimum variance solution
386,728
def readXML(self): data = self.readLongString() root = xml.fromstring(data) self.context.addObject(root) return root
Read XML.
386,729
def load_modules(self, filepaths): self._update_loaded_modules() filepaths = util.return_set(filepaths) modules = [] for filepath in filepaths: filepath = self._clean_filepath(filepath) if self._processed_filepath(filepath): continue module_name = util.get_module_name(filepath) plugin_module_name = util.create_unique_module_name(module_name) try: module = load_source(plugin_module_name, filepath) except Exception: exc_info = sys.exc_info() self._log.error(msg=self._error_string.format(filepath), exc_info=exc_info) continue self.loaded_modules.add(module.__name__) modules.append(module) self.processed_filepaths[module.__name__] = filepath return modules
Loads the modules from their `filepaths`. A filepath may be a directory filepath if there is an `__init__.py` file in the directory. If a filepath errors, the exception will be caught and logged in the logger. Returns a list of modules.
386,730
def pretty_objname(self, obj=None, maxlen=50, color="boldcyan"): parent_name = lambda_sub("", get_parent_name(obj) or "") objname = get_obj_name(obj) if color: objname += colorize("<{}>".format(parent_name), color, close=False) else: objname += "<{}>".format(parent_name) objname = objname if len(objname) < maxlen else \ objname[:(maxlen-1)]+"…>" if color: objname += colors.RESET return objname
Pretty prints object name @obj: the object whose name you want to pretty print @maxlen: #int maximum length of an object name to print @color: your choice of :mod:colors or |None| -> #str pretty object name .. from vital.debug import Look print(Look.pretty_objname(dict)) # -> 'dict\x1b[1;36m<builtins>\x1b[1;m' ..
386,731
def zonalstats(features, raster, all_touched, band, categorical, indent, info, nodata, prefix, stats, sequence, use_rs): if info: logging.basicConfig(level=logging.INFO) if stats is not None: stats = stats.split(" ") if in [x.lower() for x in stats]: stats = "ALL" zonal_results = gen_zonal_stats( features, raster, all_touched=all_touched, band=band, categorical=categorical, nodata=nodata, stats=stats, prefix=prefix, geojson_out=True) if sequence: for feature in zonal_results: if use_rs: click.echo(b, nl=False) click.echo(json.dumps(feature)) else: click.echo(json.dumps( {: , : list(zonal_results)}))
zonalstats generates summary statistics of geospatial raster datasets based on vector features. The input arguments to zonalstats should be valid GeoJSON Features. (see cligj) The output GeoJSON will be mostly unchanged but have additional properties per feature describing the summary statistics (min, max, mean, etc.) of the underlying raster dataset. The raster is specified by the required -r/--raster argument. Example, calculate rainfall stats for each state and output to file: \b rio zonalstats states.geojson -r rainfall.tif > mean_rainfall_by_state.geojson
386,732
def ipop_range(self, start=0, stop=-1, callback=None, withscores=True): backend = self.backend res = backend.structure(self).ipop_range(start, stop, withscores=withscores) if not callback: callback = self.load_data if withscores else self.load_values return backend.execute(res, callback)
pop a range from the :class:`OrderedMixin`
386,733
def fromFile(cls, filepath): xdata = ElementTree.parse(nstr(filepath)) xroot = xdata.getroot() name = xroot.get() ver = float(xroot.get(, )) if not name: name = os.path.basename(filepath).split() if name == : name = os.path.normpath(filepath).split(os.path.sep)[-2] name = projex.text.pretty(name) icon = xroot.get(, ) ximport = xroot.find() if ximport is not None: importpath = ximport.get(, ) else: importpath = params = {: , : , : , : } for param, default in params.items(): xdata = xroot.find(param) if xdata is not None: params[param] = xdata.text proxy = PluginProxy(cls, name, ver) proxy.setImportPath(importpath) proxy.setDescription(params[]) proxy.setAuthor(params[]) proxy.setEmail(params[]) proxy.setUrl(params[]) proxy.setFilepath(filepath) return proxy
Creates a proxy instance from the inputted registry file. :param filepath | <str> :return <PluginProxy> || None
386,734
def get_features(self, mapobject_type_name): logger.info( , self.experiment_name, mapobject_type_name ) mapobject_type_id = self._get_mapobject_type_id(mapobject_type_name) url = self._build_api_url( .format( experiment_id=self._experiment_id, mapobject_type_id=mapobject_type_id ) ) res = self._session.get(url) res.raise_for_status() return res.json()[]
Gets features for a given object type. Parameters ---------- mapobject_type_name: str type of the segmented objects Returns ------- List[Dict[str, str]] information about each feature See also -------- :func:`tmserver.api.feature.get_features` :class:`tmlib.models.feature.Feature`
386,735
def banlist(self, channel): with self.lock: self.is_in_channel(channel) self.send( % channel) bans = [] while self.readable(): msg = self._recv(expected_replies=(, )) if msg[0] == : banmask, who, timestamp = msg[2].split()[1:] bans.append((self._from_(banmask), who, \ self._m_time.localtime(int(timestamp)))) elif msg[0] == : break return bans
Get the channel banlist. Required arguments: * channel - Channel of which to get the banlist for.
386,736
def describe_event_source_mapping(UUID=None, EventSourceArn=None, FunctionName=None, region=None, key=None, keyid=None, profile=None): ids = _get_ids(UUID, EventSourceArn=EventSourceArn, FunctionName=FunctionName) if not ids: return {: None} UUID = ids[0] try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) desc = conn.get_event_source_mapping(UUID=UUID) if desc: keys = (, , , , , , , ) return {: dict([(k, desc.get(k)) for k in keys])} else: return {: None} except ClientError as e: return {: __utils__[](e)}
Given an event source mapping ID or an event source ARN and FunctionName, obtain the current settings of that mapping. Returns a dictionary of interesting properties. CLI Example: .. code-block:: bash salt myminion boto_lambda.describe_event_source_mapping uuid
386,737
def get_grade_entry_form_for_update(self, grade_entry_id): collection = JSONClientValidated(, collection=, runtime=self._runtime) if not isinstance(grade_entry_id, ABCId): raise errors.InvalidArgument() if (grade_entry_id.get_identifier_namespace() != or grade_entry_id.get_authority() != self._authority): raise errors.InvalidArgument() result = collection.find_one({: ObjectId(grade_entry_id.get_identifier())}) obj_form = objects.GradeEntryForm( osid_object_map=result, effective_agent_id=str(self.get_effective_agent_id()), runtime=self._runtime, proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not UPDATED return obj_form
Gets the grade entry form for updating an existing entry. A new grade entry form should be requested for each update transaction. arg: grade_entry_id (osid.id.Id): the ``Id`` of the ``GradeEntry`` return: (osid.grading.GradeEntryForm) - the grade entry form raise: NotFound - ``grade_entry_id`` is not found raise: NullArgument - ``grade_entry_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
386,738
def strainer(self): analyse = list() for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != : try: except (KeyError, AttributeError): self.populator(sample) analyse.append(True) else: self.populator(sample) analyse.append(False) MLST(self)
Determine whether it is required to run the MLST analyses
386,739
def wait_for_task_property(service, task, prop, timeout_sec=120): return time_wait(lambda: task_property_present_predicate(service, task, prop), timeout_seconds=timeout_sec)
Waits for a task to have the specified property
386,740
def as_dict(self): d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__} if isinstance(self.entry, IonEntry): d["entry_type"] = "Ion" else: d["entry_type"] = "Solid" d["entry"] = self.entry.as_dict() d["concentration"] = self.concentration d["entry_id"] = self.entry_id return d
Returns dict which contains Pourbaix Entry data. Note that the pH, voltage, H2O factors are always calculated when constructing a PourbaixEntry object.
386,741
def imagetransformer_ae_cifar(): hparams = transformer_ae_small() hparams.filter_size = 512 hparams.num_compress_steps = 3 hparams.startup_steps = 10000 hparams.is_2d = 0 hparams.learning_rate_warmup_steps = 8000 hparams.learning_rate = 0.2 hparams.hidden_size = 512 hparams.batch_size = 1 hparams.max_length = 256 hparams.dropout = 0.0 hparams.clip_grad_norm = 0. hparams.optimizer_adam_epsilon = 1e-9 hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.initializer_gain = 0.2 hparams.num_hidden_layers = 6 hparams.initializer = "uniform_unit_scaling" hparams.weight_decay = 0.0 hparams.optimizer_adam_beta1 = 0.9 hparams.optimizer_adam_beta2 = 0.98 hparams.label_smoothing = 0.0 hparams.norm_type = "layer" hparams.layer_prepostprocess_dropout = 0.0 hparams.num_heads = 8 hparams.task = "image" hparams.ffn_layer = "conv_hidden_relu" hparams.attention_dropout = 0.0 hparams.relu_dropout = 0. hparams.pos = "timing" hparams.nbr_decoder_problems = 1 hparams.num_output_layers = 3 hparams.bottleneck_kind = "dvq" hparams.add_hparam("block_size", 1) hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64]) hparams.add_hparam("dilated_attention", False) hparams.add_hparam("img_len", 32) hparams.add_hparam("num_channels", 3) hparams.add_hparam("local_and_global_att", False) hparams.add_hparam("block_length", 256) hparams.add_hparam("block_width", 128) hparams.num_encoder_layers = 4 hparams.num_decoder_layers = 12 hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D) hparams.add_hparam("block_raster_scan", False) hparams.add_hparam("shared_rel", False) hparams.add_hparam("q_filter_width", 1) hparams.add_hparam("kv_filter_width", 1) hparams.add_hparam("unconditional", False) hparams.bottom["targets"] = modalities.image_channel_embeddings_bottom hparams.top["targets"] = modalities.image_channel_embeddings_top hparams.drop_inputs = True hparams.do_attend_compress = False hparams.do_attend_decompress = False return hparams
Hyperparameters for CIFAR-10 experiments.
386,742
def clear_trash(cookie, tokens): url = .join([ const.PAN_API_URL, , , util.timestamp(), , tokens[], ]) req = net.urlopen(url, headers={ : cookie.header_output(), }, data=.encode()) if req: content = req.data return json.loads(content.decode()) else: return None
清空回收站, 将里面的所有文件都删除.
386,743
def _get_all_styles(self): _styles = {} def _get_style(bold=False, bg_col=None, border=None): if (bold, bg_col, border) not in _styles: _styles[(bold, bg_col, border)] = CellStyle(bold=bold, bg_color=bg_col, border=border) return _styles[(bold, bg_col, border)] ws_styles = {} for table, (row, col) in self.__tables.values(): for r in range(row, row + table.header_height): for c in range(col, col + table.width): if isinstance(table.header_style, dict): col_name = table.dataframe.columns[c - col] style = table.header_style.get(col_name, _get_style(bold=True)) else: style = table.header_style or _get_style(bold=True) ws_styles[(r, c)] = style for c in range(col, col + table.row_labels_width): for r in range(row + table.header_height, row + table.height): if isinstance(table.index_style, dict): row_name = table.dataframe.index[r - row] style = table.index_style.get(row_name, _get_style(bold=True)) else: style = table.index_style or _get_style(bold=True) ws_styles[(r, c)] = style if table.style.stripe_colors or table.style.border: num_bg_cols = len(table.style.stripe_colors) if \ table.style.stripe_colors else 1 bg_cols = table.style.stripe_colors if \ table.style.stripe_colors else None for i, row_offset in enumerate(range(table.header_height, table.height)): for c in range(col, col + table.width): bg_col = bg_cols[i % num_bg_cols] if bg_cols else None style = _get_style(bold=None, bg_col=bg_col, border=table.style.border) if (row + row_offset, c) in ws_styles: style = style + ws_styles[(row + row_offset, c)] ws_styles[(row + row_offset, c)] = style for col_name, col_style in table.column_styles.items(): try: col_offset = table.get_column_offset(col_name) except KeyError: continue for i, r in enumerate(range(row + table.header_height, row + table.height)): style = col_style if (r, col + col_offset) in ws_styles: style = ws_styles[(r, col + col_offset)] + style ws_styles[(r, col + col_offset)] = style for row_name, row_style in table.row_styles.items(): try: row_offset = table.get_row_offset(row_name) except KeyError: continue for i, c in enumerate(range(col + table.row_labels_width, col + table.width)): style = row_style if (row + row_offset, c) in ws_styles: style = ws_styles[(row + row_offset, c)] + style ws_styles[(row + row_offset, c)] = style for (row_name, col_name), cell_style in table.cell_styles.items(): try: col_offset = table.get_column_offset(col_name) row_offset = table.get_row_offset(row_name) except KeyError: continue style = cell_style if (row + row_offset, col + col_offset) in ws_styles: style = ws_styles[(row + row_offset, col + col_offset)] + style ws_styles[(row + row_offset, col + col_offset)] = style for (row, col), value in self.__values.items(): if isinstance(value, Value): style = value.style if style: if (row, col) in ws_styles: style = style + ws_styles[(row, col)] ws_styles[(row, col)] = style return ws_styles
return a dictionary of {(row, col) -> CellStyle} for all cells that use a non-default style.
386,744
def string_length(ctx, s=None): if s is None: s = ctx.node elif callable(s): s = next(s.compute(ctx), ) yield len(s)
Yields one number
386,745
def get_message_definitions(self, msgid_or_symbol: str) -> list: if msgid_or_symbol[1:].isdigit(): msgid_or_symbol = msgid_or_symbol.upper() for source in (self._alternative_names, self._messages_definitions): try: return [source[msgid_or_symbol]] except KeyError: pass error_msg = "No such message id or symbol .".format( msgid_or_symbol=msgid_or_symbol ) raise UnknownMessageError(error_msg)
Returns the Message object for this message. :param str msgid_or_symbol: msgid_or_symbol may be either a numeric or symbolic id. :raises UnknownMessageError: if the message id is not defined. :rtype: List of MessageDefinition :return: A message definition corresponding to msgid_or_symbol
386,746
def ret(eqdata, **kwargs): if not in kwargs: kwargs[] = result = growth(eqdata, **kwargs) result.values[:, :] -= 1. return result
Generate a DataFrame where the sole column, 'Return', is the return for the equity over the given number of sessions. For example, if 'XYZ' has 'Adj Close' of `100.0` on 2014-12-15 and `90.0` 4 *sessions* later on 2014-12-19, then the 'Return' value for 2014-12-19 will be `-0.1`. Parameters ---------- eqdata : DataFrame Data such as that returned by `get()` selection : str, optional Column from which to determine growth values. Defaults to 'Adj Close'. n_sessions : int Number of sessions to count back for calculating today's return. For example, if `n_sessions` is set to 4, return is calculated relative to the price 4 sessions ago. Defaults to 1 (price of previous session). skipstartrows : int Rows to skip at beginning of `eqdata` in addition to the 1 row that must be skipped because the calculation relies on a prior data point. Defaults to 0. skipendrows : int Rows to skip at end of `eqdata`. Defaults to 0. outputcol : str, optional Name for column of output dataframe. Defaults to 'Return'. Returns ---------- out : DataFrame See Also -------- :func:`growth` Notes ---------- The interval is the number of *sessions* between the 2 values whose ratio is being measured, *not* the number of days (which includes days on which the market is closed). The percentage gain or loss is measured relative to the earlier date, but the index date is the later date. The index is chose because that is the date on which the value is known. The percentage measure is because that is the way for calculating percent profit and loss.
386,747
def scaled_pressure_send(self, time_boot_ms, press_abs, press_diff, temperature, force_mavlink1=False): return self.send(self.scaled_pressure_encode(time_boot_ms, press_abs, press_diff, temperature), force_mavlink1=force_mavlink1)
The pressure readings for the typical setup of one absolute and differential pressure sensor. The units are as specified in each field. time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) press_abs : Absolute pressure (hectopascal) (float) press_diff : Differential pressure 1 (hectopascal) (float) temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
386,748
def file_or_stderr(filename, *, mode="a", encoding="utf-8"): if filename is not None: return open(filename, mode, encoding=encoding) @contextmanager def stderr_wrapper(): yield sys.stderr return stderr_wrapper()
Returns a context object wrapping either the given file or stderr (if filename is None). This makes dealing with log files more convenient.
386,749
def get_requirements(): requirements = [] with open( os.path.join(BASE_DIRECTORY, ), , encoding= ) as requirements_file: lines = requirements_file.readlines() for line in lines: requirements.append(line.strip()) return requirements
Returns the content of 'requirements.txt' in a list. :return: The content of 'requirements.txt'. :rtype: list(str)
386,750
def validate(self, sources): if not isinstance(sources, Root): raise Exception("Source object expected") parameters = self.get_uri_with_missing_parameters(sources) for parameter in parameters: logging.getLogger().warn( % (parameter["name"], parameter["method"], parameter["version"]))
Validate the format of sources
386,751
def _update_param(self): r if not isinstance(self._gamma_update, type(None)): self._gamma = self._gamma_update(self._gamma) if not isinstance(self._lambda_update, type(None)): self._lambda_param = self._lambda_update(self._lambda_param)
r"""Update parameters This method updates the values of the algorthm parameters with the methods provided
386,752
def _evaluate(self,x,y): if _isscalar(x): x_pos = max(min(self.xSearchFunc(self.x_list,x),self.x_n-1),1) y_pos = max(min(self.ySearchFunc(self.y_list,y),self.y_n-1),1) else: x_pos = self.xSearchFunc(self.x_list,x) x_pos[x_pos < 1] = 1 x_pos[x_pos > self.x_n-1] = self.x_n-1 y_pos = self.ySearchFunc(self.y_list,y) y_pos[y_pos < 1] = 1 y_pos[y_pos > self.y_n-1] = self.y_n-1 alpha = (x - self.x_list[x_pos-1])/(self.x_list[x_pos] - self.x_list[x_pos-1]) beta = (y - self.y_list[y_pos-1])/(self.y_list[y_pos] - self.y_list[y_pos-1]) f = ( (1-alpha)*(1-beta)*self.f_values[x_pos-1,y_pos-1] + (1-alpha)*beta*self.f_values[x_pos-1,y_pos] + alpha*(1-beta)*self.f_values[x_pos,y_pos-1] + alpha*beta*self.f_values[x_pos,y_pos]) return f
Returns the level of the interpolated function at each value in x,y. Only called internally by HARKinterpolator2D.__call__ (etc).
386,753
def map_forecast_estimate(self): assert self.forecasts is not None islog = self.pst.parameter_data.partrans == "log" par_map = self.map_parameter_estimate par_map.loc[islog,:] = np.log10(par_map.loc[islog,:]) par_map = Matrix.from_dataframe(par_map.loc[:,["post_expt"]]) posts,priors = [],[] post_expt = (self.predictions.T * par_map).to_dataframe() for fname in self.forecast_names: pr = self.pst.res.loc[fname,"modelled"] priors.append(pr) posts.append(pr + post_expt.loc[fname,"post_expt"]) return pd.DataFrame(data=np.array([priors,posts]).transpose(), columns=["prior_expt","post_expt"], index=self.forecast_names)
get the prior and posterior forecast (prediction) expectations. Returns ------- pandas.DataFrame : pandas.DataFrame dataframe with prior and posterior forecast expected values
386,754
def resources(ctx, gpu): user, project_name, _job = get_job_or_local(ctx.obj.get(), ctx.obj.get()) try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().job.resources(user, project_name, _job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error(.format(_job)) Printer.print_error(.format(e)) sys.exit(1)
Get job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon job -j 2 resources ``` For GPU resources \b ```bash $ polyaxon job -j 2 resources --gpu ```
386,755
def cov(self, other, min_periods=None): this, other = self.align(other, join=, copy=False) if len(this) == 0: return np.nan return nanops.nancov(this.values, other.values, min_periods=min_periods)
Compute covariance with Series, excluding missing values. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874
386,756
def string(prompt=None, empty=False): s = _prompt_input(prompt) if empty and not s: return None else: if s: return s else: return string(prompt=prompt, empty=empty)
Prompt a string. Parameters ---------- prompt : str, optional Use an alternative prompt. empty : bool, optional Allow an empty response. Returns ------- str or None A str if the user entered a non-empty string. None if the user pressed only Enter and ``empty`` was True.
386,757
def getDependents(self, retracted=False): def is_dependent(analysis): calculation = analysis.getCalculation() if not calculation: return False services = calculation.getRawDependentServices() if not services: return False query = dict(UID=services, getKeyword=self.getKeyword()) services = api.search(query, "bika_setup_catalog") return len(services) > 0 siblings = self.getSiblings(retracted=retracted) return filter(lambda sib: is_dependent(sib), siblings)
Returns a list of siblings who depend on us to calculate their result. :param retracted: If false, retracted/rejected dependents are dismissed :type retracted: bool :return: Analyses the current analysis depends on :rtype: list of IAnalysis
386,758
def add_on_channel_close_callback(self): self._logger.info() self._channel.add_on_close_callback(self.on_channel_closed)
Tell pika to call the on_channel_closed method if RabbitMQ unexpectedly closes the channel.
386,759
def ra_indices_for_traj(self, traj): assert not self.uniform_stride, "requested random access indices, but is in uniform stride mode" if traj in self.traj_keys: return self.ra_indices_for_traj_dict[traj] else: return np.array([])
Gives the indices for a trajectory file index (without changing the order within the trajectory itself). :param traj: a trajectory file index :return: a Nx1 - np.array of the indices corresponding to the trajectory index
386,760
def get_a(name=None, ipv4addr=None, allow_array=True, **api_opts): data = {} if name: data[] = name if ipv4addr: data[] = ipv4addr r = get_object(, data=data, **api_opts) if r and len(r) > 1 and not allow_array: raise Exception() return r
Get A record CLI Examples: .. code-block:: bash salt-call infoblox.get_a name=abc.example.com salt-call infoblox.get_a ipv4addr=192.168.3.5
386,761
def with_optimizer_tensor(self, tensor: Union[tf.Tensor, tf.Operation]) -> : self._optimizer_tensor = tensor return self
Replace optimizer tensor. :param model: Tensorflow tensor. :return: Optimization instance self reference.
386,762
def local_outgoing_hook(handler=None, coro=None): if handler is None: return lambda h: local_outgoing_hook(h, coro) if not hasattr(handler, "__call__"): raise TypeError("trace hooks must be callable") if coro is None: coro = compat.getcurrent() log.info("setting a coroutine local outgoing hook callback") state.local_from_hooks.setdefault(coro, []).append( weakref.ref(handler)) return handler
add a callback to run every time a greenlet is switched away from :param handler: the callback function, must be a function taking 2 arguments: - an integer indicating whether it is being called as an incoming (1) hook or as an outgoing (2) hook (in this case it will always be 2). - the coroutine being switched from (in this case it is the one indicated by the ``coro`` argument to ``local_outgoing_hook``. Be aware that only a weak reference to this function will be held. :type handler: function :param coro: the coroutine for which to apply the trace hook (defaults to current) :type coro: greenlet
386,763
def _mark_html_fields_as_safe(self, page): page.title = mark_safe(page.title) page.content = mark_safe(page.content) return page
Mark the html content as safe so we don't have to use the safe template tag in all cms templates:
386,764
def marshal(self, v): if v: orig = [i for i in self.choices if self.choices[i] == v] if len(orig) == 1: return orig[0] elif len(orig) == 0: raise NotImplementedError("No such reverse choice {0} for field {1}.".format(v, self)) else: raise NotImplementedError("Too many reverse choices {0} for value {1} for field {2}".format(orig, v, self))
Turn this value into API format. Do a reverse dictionary lookup on choices to find the original value. If there are no keys or too many keys for now we raise a NotImplementedError as marshal is not used anywhere currently. In the future we will want to fail gracefully.
386,765
def format_table(table, column_names=None, column_specs=None, max_col_width=32, auto_col_width=False): orig_col_args = dict(column_names=column_names, column_specs=column_specs) if len(table) > 0: col_widths = [0] * len(table[0]) elif column_specs is not None: col_widths = [0] * (len(column_specs) + 1) elif column_names is not None: col_widths = [0] * len(column_names) my_col_names, id_column = [], None if column_specs is not None: column_names = ["Row"] column_names.extend([col["name"] for col in column_specs]) column_specs = [{"name": "Row", "type": "float"}] + column_specs if column_names is not None: for i in range(len(column_names)): if column_names[i].lower() == "id": id_column = i my_col = ansi_truncate(str(column_names[i]), max_col_width if i not in {0, id_column} else 99) my_col_names.append(my_col) col_widths[i] = max(col_widths[i], len(strip_ansi_codes(my_col))) trunc_table = [] for row in table: my_row = [] for i in range(len(row)): my_item = ansi_truncate(str(row[i]), max_col_width if i not in {0, id_column} else 99) my_row.append(my_item) col_widths[i] = max(col_widths[i], len(strip_ansi_codes(my_item))) trunc_table.append(my_row) type_colormap = {"boolean": BLUE(), "integer": YELLOW(), "float": WHITE(), "string": GREEN()} for i in "uint8", "int16", "uint16", "int32", "uint32", "int64": type_colormap[i] = type_colormap["integer"] type_colormap["double"] = type_colormap["float"] def col_head(i): if column_specs is not None: return BOLD() + type_colormap[column_specs[i]["type"]] + column_names[i] + ENDC() else: return BOLD() + WHITE() + column_names[i] + ENDC() formatted_table = [border("┌") + border("┬").join(border("─") * i for i in col_widths) + border("┐")] if len(my_col_names) > 0: padded_column_names = [col_head(i) + " " * (col_widths[i] - len(my_col_names[i])) for i in range(len(my_col_names))] formatted_table.append(border("│") + border("│").join(padded_column_names) + border("│")) formatted_table.append(border("├") + border("┼").join(border("─") * i for i in col_widths) + border("┤")) for row in trunc_table: padded_row = [row[i] + " " * (col_widths[i] - len(strip_ansi_codes(row[i]))) for i in range(len(row))] formatted_table.append(border("│") + border("│").join(padded_row) + border("│")) formatted_table.append(border("└") + border("┴").join(border("─") * i for i in col_widths) + border("┘")) if auto_col_width: if not sys.stdout.isatty(): raise AegeaException("Cannot auto-format table, output is not a terminal") table_width = len(strip_ansi_codes(formatted_table[0])) tty_cols, tty_rows = get_terminal_size() if table_width > max(tty_cols, 80): return format_table(table, max_col_width=max_col_width - 1, auto_col_width=True, **orig_col_args) return "\n".join(formatted_table)
Table pretty printer. Expects tables to be given as arrays of arrays:: print(format_table([[1, "2"], [3, "456"]], column_names=['A', 'B']))
386,766
def to_filelink(self): if self.status != : return response = utils.make_call(self.url, ) if response.ok: response = response.json() handle = re.match( r, response[][] ).group(1) return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security) raise Exception(response.text)
Checks is the status of the conversion is complete and, if so, converts to a Filelink *returns* [Filestack.Filelink] ```python filelink = av_convert.to_filelink() ```
386,767
def add_command_hooks(commands, srcdir=): hook_re = re.compile(r) return cmdcls.command_name else: return cmdcls.__name__ packages = find_packages(srcdir) dist = get_dummy_distribution() hooks = collections.defaultdict(dict) for setuppkg in iter_setup_packages(srcdir, packages): for name, obj in vars(setuppkg).items(): match = hook_re.match(name) if not match: continue hook_type = match.group(1) cmd_name = match.group(2) if hook_type not in hooks[cmd_name]: hooks[cmd_name][hook_type] = [] hooks[cmd_name][hook_type].append((setuppkg.__name__, obj)) for cmd_name, cmd_hooks in hooks.items(): commands[cmd_name] = generate_hooked_command( cmd_name, dist.get_command_class(cmd_name), cmd_hooks)
Look through setup_package.py modules for functions with names like ``pre_<command_name>_hook`` and ``post_<command_name>_hook`` where ``<command_name>`` is the name of a ``setup.py`` command (e.g. build_ext). If either hook is present this adds a wrapped version of that command to the passed in ``commands`` `dict`. ``commands`` may be pre-populated with other custom distutils command classes that should be wrapped if there are hooks for them (e.g. `AstropyBuildPy`).
386,768
def random_sample(self, elements=(, , ), length=None): return self.random_elements(elements, length, unique=True)
Returns a list of random unique elements for the specified length. Multiple occurrences of the same value increase its probability to be in the output.
386,769
def fit(self, X, y=None, **kwargs): if is_dataframe(X): self.X = X.values if self.features_ is None: self.features_ = X.columns else: self.X = X self.y = y super(MissingDataVisualizer, self).fit(X, y, **kwargs)
The fit method is the primary drawing input for the visualization since it has both the X and y data required for the viz and the transform method does not. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values kwargs : dict Pass generic arguments to the drawing method Returns ------- self : instance Returns the instance of the transformer/visualizer
386,770
def tobinarray(self, start=None, end=None, pad=_DEPRECATED, size=None): if not isinstance(pad, _DeprecatedParam): print ("IntelHex.tobinarray: parameter is deprecated.") if pad is not None: print ("Please, use IntelHex.padding attribute instead.") else: print ("Please, don't pass it explicitly.") print ("Use syntax like this: ih.tobinarray(start=xxx, end=yyy, size=zzz)") else: pad = None return self._tobinarray_really(start, end, pad, size)
Convert this object to binary form as array. If start and end unspecified, they will be inferred from the data. @param start start address of output bytes. @param end end address of output bytes (inclusive). @param pad [DEPRECATED PARAMETER, please use self.padding instead] fill empty spaces with this value (if pad is None then this method uses self.padding). @param size size of the block, used with start or end parameter. @return array of unsigned char data.
386,771
def clear_to_enc_filename(fname): if not fname.lower().endswith(): raise CredkeepException() if fname.lower().endswith(): raise CredkeepException() enc_fname = fname[:-4] + return enc_fname if exists(enc_fname) else None
Converts the filename of a cleartext file and convert it to an encrypted filename :param fname: :return: filename of encrypted secret file if found, else None
386,772
def confirm_email(self): if self._email and self.email_new: self._email = self.email_new self.email_confirmed = True self.email_link = None self.email_new = None self.email_link_expires = None
Confirm email
386,773
def set_digital_line_state(line_name, state): bits_to_shift = int(line_name.split()[-1]) dig_data = np.ones(2, dtype="uint32")*bool(state)*(2**bits_to_shift) DigitalOutputTask(line_name, dig_data).StartAndWait()
Set the state of a single digital line. line_name (str) - The physical name of the line. e.g line_name="Dev1/port0/line3" This should be a single digital line. Specifying more than one would result in unexpected behaviour. For example "Dev1/port0/line0:5" is not allowed. see http://zone.ni.com/reference/en-XX/help/370466W-01/mxcncpts/physchannames/ for details of naming lines. state (bool) - state=True sets the line to high, state=False sets to low.
386,774
def json_datetime_serial(obj): if isinstance(obj, (datetime, date)): serial = obj.isoformat() return serial if ObjectId is not None and isinstance(obj, ObjectId): return str(obj) raise TypeError("Type not serializable")
JSON serializer for objects not serializable by default json code
386,775
def update(self, id, data): return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode())
Replaces document with _id = id with data. :param id: _id of document to update :type id: ``string`` :param data: the new document to insert :type data: ``string`` :return: id of replaced document :rtype: ``dict``
386,776
def set_in(self, que_in, num_senders): for p in self.processes: p.set_in(que_in, num_senders)
Set the queue in input and the number of parallel tasks that send inputs
386,777
def write(self, offset, data): writefile = getattr(self, , None) if writefile is None: return SFTP_OP_UNSUPPORTED try: if (self.__flags & os.O_APPEND) == 0: if self.__tell is None: self.__tell = writefile.tell() if offset != self.__tell: writefile.seek(offset) self.__tell = offset writefile.write(data) writefile.flush() except IOError, e: self.__tell = None return SFTPServer.convert_errno(e.errno) if self.__tell is not None: self.__tell += len(data) return SFTP_OK
Write C{data} into this file at position C{offset}. Extending the file past its original end is expected. Unlike python's normal C{write()} methods, this method cannot do a partial write: it must write all of C{data} or else return an error. The default implementation checks for an attribute on C{self} named C{writefile}, and if present, performs the write operation on the python file-like object found there. The attribute is named differently from C{readfile} to make it easy to implement read-only (or write-only) files, but if both attributes are present, they should refer to the same file. @param offset: position in the file to start reading from. @type offset: int or long @param data: data to write into the file. @type data: str @return: an SFTP error code like L{SFTP_OK}.
386,778
def _from_api_repr(cls, resource): job_id = resource.get("jobId") project = resource.get("projectId") location = resource.get("location") job_ref = cls(job_id, project, location) return job_ref
Returns a job reference for an API resource representation.
386,779
def metric_get(self, project, metric_name): path = "projects/%s/metrics/%s" % (project, metric_name) metric_pb = self._gapic_api.get_log_metric(path) return MessageToDict(metric_pb)
API call: retrieve a metric resource. :type project: str :param project: ID of the project containing the metric. :type metric_name: str :param metric_name: the name of the metric :rtype: dict :returns: The metric object returned from the API (converted from a protobuf to a dictionary).
386,780
def ph_supconj(b, orbit, solve_for=None, **kwargs): orbit_ps = _get_system_ps(b, orbit)
TODO: add documentation
386,781
def validation_statuses(self, area_uuid): path = "/area/{uuid}/validations".format(uuid=area_uuid) result = self._make_request(, path) return result.json()
Get count of validation statuses for all files in upload_area :param str area_uuid: A RFC4122-compliant ID for the upload area :return: a dict with key for each state and value being the count of files in that state :rtype: dict :raises UploadApiException: if information could not be obtained
386,782
def commit(self): self.flush() if hasattr(self, ) and self.transaction.is_active: self.transaction.commit() elif hasattr(self, ): self.connection.commit()
Commit the transaction
386,783
def dump_table(self, table, drop_statement=True): create_statement = self.get_table_definition(table) data = self.select_all(table) statements = [, sql_file_comment(), sql_file_comment(.format(table)), sql_file_comment()] if drop_statement: statements.append(.format(wrap(table))) statements.append(.format(create_statement)) if len(data) > 0: statements.append(.format(insert_statement(table, self.get_columns(table), data))) return .join(statements)
Export a table structure and data to SQL file for backup or later import.
386,784
def transform(self, path): if path is None or not path: return None obj_parent_modules = path.split(".") objects = [obj_parent_modules.pop(-1)] while True: try: parent_module_path = ".".join(obj_parent_modules) parent_module = importlib.import_module(parent_module_path) break except ImportError: if len(obj_parent_modules) == 1: raise ImportError("No module named " % obj_parent_modules[0]) objects.insert(0, obj_parent_modules.pop(-1)) current_object = parent_module for obj in objects: current_object = getattr(current_object, obj) return current_object
Transform a path into an actual Python object. The path can be arbitrary long. You can pass the path to a package, a module, a class, a function or a global variable, as deep as you want, as long as the deepest module is importable through ``importlib.import_module`` and each object is obtainable through the ``getattr`` method. Local objects will not work. Args: path (str): the dot-separated path of the object. Returns: object: the imported module or obtained object.
386,785
def stop(self): self.working = False for w in self.workers: w.join() self.workers = []
Stops the worker threads and waits for them to finish
386,786
async def reportCompleted(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs)
Report Run Completed Report a task completed, resolving the run as `completed`. This method gives output: ``v1/task-status-response.json#`` This method is ``stable``
386,787
def rvs(self, *args, **kwargs): size = kwargs.pop(, 1) random_state = kwargs.pop(, None) return self._kde.sample(n_samples=size, random_state=random_state)
Draw Random Variates. Parameters ---------- size: int, optional (default=1) random_state_: optional (default=None)
386,788
def fixed_string(self, data=None): old = self.fixed if data != None: new = self._decode_input_string(data) if len(new) <= 16: self.fixed = new else: raise yubico_exception.InputError() return old
The fixed string is used to identify a particular Yubikey device. The fixed string is referred to as the 'Token Identifier' in OATH-HOTP mode. The length of the fixed string can be set between 0 and 16 bytes. Tip: This can also be used to extend the length of a static password.
386,789
def load_tiff(file): ndv, xsize, ysize, geot, projection, datatype = get_geo_info(file) data = gdalnumeric.LoadFile(file) data = np.ma.masked_array(data, mask=data == ndv, fill_value=ndv) return data
Load a geotiff raster keeping ndv values using a masked array Usage: data = load_tiff(file)
386,790
def add_markdown(self, markdown): if markdown is None: raise NullArgument() if not self.my_osid_object_form._is_valid_string( markdown, self.get_markdown_metadata()): raise InvalidArgument() self.my_osid_object_form._my_map[] = markdown
stub
386,791
def logToFile(path, level=logging.INFO): logger = logging.getLogger() logger.setLevel(level) formatter = logging.Formatter( ) handler = logging.FileHandler(path) handler.setFormatter(formatter) logger.addHandler(handler)
Create a log handler that logs to the given file.
386,792
def read_mm_uic2(fd, byte_order, dtype, count): result = {: count} values = numpy.fromfile(fd, byte_order+, 6*count) result[] = values[0::6] // values[1::6] return result
Read MM_UIC2 tag from file and return as dictionary.
386,793
def printProfile(self, reset=False): print "Profiling information for {}".format(type(self).__name__) totalTime = 0.000001 for region in self.network.regions.values(): timer = region.getComputeTimer() totalTime += timer.getElapsed() regionNames = list(self.network.regions.keys()) regionNames.sort() count = 1 profileInfo = [] L2Time = 0.0 L4Time = 0.0 for regionName in regionNames: region = self.network.regions[regionName] timer = region.getComputeTimer() count = max(timer.getStartCount(), count) profileInfo.append([region.name, timer.getStartCount(), timer.getElapsed(), 100.0 * timer.getElapsed() / totalTime, timer.getElapsed() / max(timer.getStartCount(), 1)]) if "L2Column" in regionName: L2Time += timer.getElapsed() elif "L4Column" in regionName: L4Time += timer.getElapsed() profileInfo.append( ["Total time", "", totalTime, "100.0", totalTime / count]) print tabulate(profileInfo, headers=["Region", "Count", "Elapsed", "Pct of total", "Secs/iteration"], tablefmt="grid", floatfmt="6.3f") print print "Total time in L2 =", L2Time print "Total time in L4 =", L4Time if reset: self.resetProfile()
Prints profiling information. Parameters: ---------------------------- @param reset (bool) If set to True, the profiling will be reset.
386,794
def setup_sft_obs(sft_file,ins_file=None,start_datetime=None,times=None,ncomp=1): df = pd.read_csv(sft_file,skiprows=1,delim_whitespace=True) df.columns = [c.lower().replace("-","_") for c in df.columns] if times is None: times = df.time.unique() missing = [] utimes = df.time.unique() for t in times: if t not in utimes: missing.append(str(t)) if len(missing) > 0: print(df.time) raise Exception("the following times are missing:{0}".format(.join(missing))) with open("sft_obs.config",) as f: f.write(sft_file+) [f.write("{0:15.6E}\n".format(t)) for t in times] df = apply_sft_obs() utimes = df.time.unique() for t in times: assert t in utimes,"time {0} missing in processed dataframe".format(t) idx = df.time.apply(lambda x: x in times) if start_datetime is not None: start_datetime = pd.to_datetime(start_datetime) df.loc[:,"time_str"] = pd.to_timedelta(df.time,unit=) + start_datetime df.loc[:,"time_str"] = df.time_str.apply(lambda x: datetime.strftime(x,"%Y%m%d")) else: df.loc[:,"time_str"] = df.time.apply(lambda x: "{0:08.2f}".format(x)) df.loc[:,"ins_str"] = "l1\n" df_times = df.loc[idx,:] df.loc[:,"icomp"] = 1 icomp_idx = list(df.columns).index("icomp") for t in times: df_time = df.loc[df.time==t,:] vc = df_time.sfr_node.value_counts() ncomp = vc.max() assert np.all(vc.values==ncomp) nstrm = df_time.shape[0] / ncomp for icomp in range(ncomp): s = int(nstrm*(icomp)) e = int(nstrm*(icomp+1)) idxs = df_time.iloc[s:e,:].index df_time.loc[idxs,"icomp"] = int(icomp+1) df.loc[df_time.index,"ins_str"] = df_time.apply(lambda x: "l1 w w !sfrc{0}_{1}_{2}! !swgw{0}_{1}_{2}! !gwcn{0}_{1}_{2}!\n".\ format(x.sfr_node,x.icomp,x.time_str),axis=1) df.index = np.arange(df.shape[0]) if ins_file is None: ins_file = sft_file+".processed.ins" with open(ins_file,) as f: f.write("pif ~\nl1\n") [f.write(i) for i in df.ins_str] df = try_process_ins_file(ins_file,sft_file+".processed") if df is not None: return df else: return None
writes an instruction file for a mt3d-usgs sft output file Parameters ---------- sft_file : str the sft output file (ASCII) ins_file : str the name of the instruction file to create. If None, the name is <sft_file>.ins. Default is None start_datetime : str a pandas.to_datetime() compatible str. If not None, then the resulting observation names have the datetime suffix. If None, the suffix is the output totim. Default is None times : iterable a container of times to make observations for. If None, all times are used. Default is None. ncomp : int number of components in transport model. Default is 1. Returns ------- df : pandas.DataFrame a dataframe with obsnme and obsval for the sft simulated concentrations and flows. If inschek was not successfully run, then returns None Note ---- setups up observations for SW conc, GW conc and flowgw for all times and reaches.
386,795
def _init_map(self): DecimalAnswerFormRecord._init_map(self) DecimalValuesFormRecord._init_map(self) TextAnswerFormRecord._init_map(self) TextsFormRecord._init_map(self) super(edXNumericResponseAnswerFormRecord, self)._init_map()
call these all manually because non-cooperative
386,796
def close(self): try: self.ssh.close() self.logger.debug("close connect succeed.") except paramiko.SSHException as e: self.unknown("close connect error: %s" % e)
Close and exit the connection.
386,797
def createBlocksFromHTML(cls, html, encoding=): parser = cls(encoding=encoding) parser.parseStr(html) rootNode = parser.getRoot() rootNode.remove() return rootNode.blocks
createBlocksFromHTML - Returns the root level node (unless multiple nodes), and a list of "blocks" added (text and nodes). @return list< str/AdvancedTag > - List of blocks created. May be strings (text nodes) or AdvancedTag (tags) NOTE: Results may be checked by: issubclass(block.__class__, AdvancedTag) If True, block is a tag, otherwise, it is a text node
386,798
def _start(self, update_cmd): try: self._poll(update_cmd) except BadStatus: self._operation.status = self._exception = CloudError(self._response) except BadResponse as err: self._operation.status = self._exception = CloudError(self._response, str(err)) except OperationFailed: self._exception = CloudError(self._response) except Exception as err: self._exception = err finally: self._done.set() callbacks, self._callbacks = self._callbacks, [] while callbacks: for call in callbacks: call(self._operation) callbacks, self._callbacks = self._callbacks, []
Start the long running operation. On completion, runs any callbacks. :param callable update_cmd: The API reuqest to check the status of the operation.
386,799
def close(self): if self._closed: raise ProgrammingError() if self._id is not None: self._connection._client.close_statement(self._connection._id, self._id) self._id = None self._signature = None self._column_data_types = [] self._frame = None self._pos = None self._closed = True
Closes the cursor. No further operations are allowed once the cursor is closed. If the cursor is used in a ``with`` statement, this method will be automatically called at the end of the ``with`` block.