Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
9,500
def partition(list_, columns=2): iter_ = iter(list_) columns = int(columns) rows = [] while True: row = [] for column_number in range(1, columns + 1): try: value = six.next(iter_) except StopIteration: pass else: row.append(value) if not row: return rows rows.append(row)
Break a list into ``columns`` number of columns.
9,501
def stream(self, area_code=values.unset, contains=values.unset, sms_enabled=values.unset, mms_enabled=values.unset, voice_enabled=values.unset, exclude_all_address_required=values.unset, exclude_local_address_required=values.unset, exclude_foreign_address_required=values.unset, beta=values.unset, near_number=values.unset, near_lat_long=values.unset, distance=values.unset, in_postal_code=values.unset, in_region=values.unset, in_rate_center=values.unset, in_lata=values.unset, in_locality=values.unset, fax_enabled=values.unset, limit=None, page_size=None): limits = self._version.read_limits(limit, page_size) page = self.page( area_code=area_code, contains=contains, sms_enabled=sms_enabled, mms_enabled=mms_enabled, voice_enabled=voice_enabled, exclude_all_address_required=exclude_all_address_required, exclude_local_address_required=exclude_local_address_required, exclude_foreign_address_required=exclude_foreign_address_required, beta=beta, near_number=near_number, near_lat_long=near_lat_long, distance=distance, in_postal_code=in_postal_code, in_region=in_region, in_rate_center=in_rate_center, in_lata=in_lata, in_locality=in_locality, fax_enabled=fax_enabled, page_size=limits[], ) return self._version.stream(page, limits[], limits[])
Streams VoipInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param unicode area_code: The area code of the phone numbers to read :param unicode contains: The pattern on which to match phone numbers :param bool sms_enabled: Whether the phone numbers can receive text messages :param bool mms_enabled: Whether the phone numbers can receive MMS messages :param bool voice_enabled: Whether the phone numbers can receive calls. :param bool exclude_all_address_required: Whether to exclude phone numbers that require an Address :param bool exclude_local_address_required: Whether to exclude phone numbers that require a local address :param bool exclude_foreign_address_required: Whether to exclude phone numbers that require a foreign address :param bool beta: Whether to read phone numbers new to the Twilio platform :param unicode near_number: Given a phone number, find a geographically close number within distance miles. (US/Canada only) :param unicode near_lat_long: Given a latitude/longitude pair lat,long find geographically close numbers within distance miles. (US/Canada only) :param unicode distance: The search radius, in miles, for a near_ query. (US/Canada only) :param unicode in_postal_code: Limit results to a particular postal code. (US/Canada only) :param unicode in_region: Limit results to a particular region. (US/Canada only) :param unicode in_rate_center: Limit results to a specific rate center, or given a phone number search within the same rate center as that number. (US/Canada only) :param unicode in_lata: Limit results to a specific local access and transport area. (US/Canada only) :param unicode in_locality: Limit results to a particular locality :param bool fax_enabled: Whether the phone numbers can receive faxes :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.api.v2010.account.available_phone_number.voip.VoipInstance]
9,502
def _fmt_float(cls, x, **kw): n = kw.get() return % (n, cls._to_float(x))
Float formatting class-method. - x parameter is ignored. Instead kw-argument f being x float-converted will be used. - precision will be taken from `n` kw-argument.
9,503
def _api_get(self, url, **kwargs): kwargs[] = self.url + url kwargs[] = self.auth headers = deepcopy(self.headers) headers.update(kwargs.get(, {})) kwargs[] = headers return self._get(**kwargs)
A convenience wrapper for _get. Adds headers, auth and base url by default
9,504
def remove_aspera_coordinator(self, transfer_coordinator): if self._in_waiting_queue(transfer_coordinator): logger.info("Remove from waiting queue count=%d" % self.waiting_coordinator_count()) with self._lockw: self._waiting_transfer_coordinators.remove(transfer_coordinator) else: logger.info("Remove from processing queue count=%d" % self.tracked_coordinator_count()) try: self.remove_transfer_coordinator(transfer_coordinator) self.append_processed_queue(transfer_coordinator) except Exception: pass self._wakeup_processing_thread()
remove entry from the waiting waiting or remove item from processig queue and add to processed quque notify background thread as it may be able to process watiign requests
9,505
def train_associations_SingleSNP(X, Y, U, S, C, numintervals, ldeltamin, ldeltamax): return _core.train_associations_SingleSNP(X, Y, U, S, C, numintervals, ldeltamin, ldeltamax)
train_associations_SingleSNP(MatrixXd const & X, MatrixXd const & Y, MatrixXd const & U, MatrixXd const & S, MatrixXd const & C, int numintervals, double ldeltamin, double ldeltamax) Parameters ---------- X: MatrixXd const & Y: MatrixXd const & U: MatrixXd const & S: MatrixXd const & C: MatrixXd const & numintervals: int ldeltamin: double ldeltamax: double
9,506
def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers): channels = common_layers.shape_list(x)[-1] signal = get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers) return x + signal
Add sinusoids of different frequencies as layer (vertical) timing signal. Args: x: a Tensor with shape [batch, length, channels] layer: layer num num_layers: total number of layers Returns: a Tensor the same shape as x.
9,507
def ws010c(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( .format(value)) self._ws010c = value
Corresponds to IDD Field `ws010c` Wind speed corresponding to 1.0% cumulative frequency of occurrence for coldest month; Args: value (float): value for IDD Field `ws010c` Unit: m/s if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
9,508
def is_valid_int_param(param): if param is None: return False try: param = int(param) if param < 0: return False except (TypeError, ValueError): return False return True
Verifica se o parâmetro é um valor inteiro válido. :param param: Valor para ser validado. :return: True se o parâmetro tem um valor inteiro válido, ou False, caso contrário.
9,509
def _init_bins(self, binset): if binset is None: if self.bandpass.waveset is not None: self._binset = self.bandpass.waveset elif self.spectrum.waveset is not None: self._binset = self.spectrum.waveset log.info( ) else: raise exceptions.UndefinedBinset( ) else: self._binset = self._validate_wavelengths(binset) if self._binset[0] > self._binset[-1]: self._binset = self._binset[::-1] self._bin_edges = binning.calculate_bin_edges(self._binset) spwave = utils.merge_wavelengths( self._bin_edges.value, self._binset.value) if self.waveset is not None: spwave = utils.merge_wavelengths(spwave, self.waveset.value) spwave = spwave[spwave > 0] indices = np.searchsorted(spwave, self._bin_edges.value) i_beg = indices[:-1] i_end = indices[1:] flux = self(spwave) avflux = (flux.value[1:] + flux.value[:-1]) * 0.5 deltaw = spwave[1:] - spwave[:-1] binflux, intwave = binning.calcbinflux( self._binset.size, i_beg, i_end, avflux, deltaw) self._binflux = binflux * flux.unit
Calculated binned wavelength centers, edges, and flux. By contrast, the native waveset and flux should be considered samples of a continuous function. Thus, it makes sense to interpolate ``self.waveset`` and ``self(self.waveset)``, but not `binset` and `binflux`.
9,510
def p_ComplianceModules(self, p): n = len(p) if n == 3: p[0] = (, p[1][1] + [p[2]]) elif n == 2: p[0] = (, [p[1]])
ComplianceModules : ComplianceModules ComplianceModule | ComplianceModule
9,511
def peek(self, n): return int.from_bytes( self.data[self.pos>>3:self.pos+n+7>>3], )>>(self.pos&7) & (1<<n)-1
Peek an n bit integer from the stream without updating the pointer. It is not an error to read beyond the end of the stream. >>> olleke.data[:2]==b'\x1b\x2e' and 0x2e1b==11803 True >>> olleke.peek(15) 11803 >>> hex(olleke.peek(32)) '0x2e1b'
9,512
def websocket_url_for_server_url(url): if url.startswith("http:"): reprotocoled = "ws" + url[4:] elif url.startswith("https:"): reprotocoled = "wss" + url[5:] else: raise ValueError("URL has unknown protocol " + url) if reprotocoled.endswith("/"): return reprotocoled + "ws" else: return reprotocoled + "/ws"
Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into the appropriate ``ws(s)`` URL Args: url (str): An ``http(s)`` URL Returns: str: The corresponding ``ws(s)`` URL ending in ``/ws`` Raises: ValueError: If the input URL is not of the proper form.
9,513
def create_pool(self, name, raid_groups, description=None, **kwargs): return UnityPool.create(self._cli, name=name, description=description, raid_groups=raid_groups, **kwargs)
Create pool based on RaidGroupParameter. :param name: pool name :param raid_groups: a list of *RaidGroupParameter* :param description: pool description :param alert_threshold: Threshold at which the system will generate alerts about the free space in the pool, specified as a percentage. :param is_harvest_enabled: True - Enable pool harvesting for the pool. False - Disable pool harvesting for the pool. :param is_snap_harvest_enabled: True - Enable snapshot harvesting for the pool. False - Disable snapshot harvesting for the pool. :param pool_harvest_high_threshold: Pool used space high threshold at which the system will automatically starts to delete snapshots in the pool :param pool_harvest_low_threshold: Pool used space low threshold under which the system will automatically stop deletion of snapshots in the pool :param snap_harvest_high_threshold: Snapshot used space high threshold at which the system automatically starts to delete snapshots in the pool :param snap_harvest_low_threshold: Snapshot used space low threshold below which the system will stop automatically deleting snapshots in the pool :param is_fast_cache_enabled: True - FAST Cache will be enabled for this pool. False - FAST Cache will be disabled for this pool. :param is_fastvp_enabled: True - Enable scheduled data relocations for the pool. False - Disable scheduled data relocations for the pool. :param pool_type: StoragePoolTypeEnum.TRADITIONAL - Create traditional pool. StoragePoolTypeEnum.DYNAMIC - Create dynamic pool. (default)
9,514
def get_foreign_keys_in_altered_table(self, diff): foreign_keys = diff.from_table.get_foreign_keys() column_names = self.get_column_names_in_altered_table(diff) for key, constraint in foreign_keys.items(): changed = False local_columns = [] for column_name in constraint.get_local_columns(): normalized_column_name = column_name.lower() if normalized_column_name not in column_names: del foreign_keys[key] break else: local_columns.append(column_names[normalized_column_name]) if column_name != column_names[normalized_column_name]: changed = True if changed: pass return foreign_keys
:param diff: The table diff :type diff: eloquent.dbal.table_diff.TableDiff :rtype: list
9,515
def startproject(project_name): dst_path = os.path.join(os.getcwd(), project_name) start_init_info(dst_path) _mkdir_p(dst_path) os.chdir(dst_path) init_code(, _manage_admin_code) init_code(, _requirement_admin_code) init_code(, _config_sql_code) app_path = os.path.join(dst_path, ) _mkdir_p(app_path) os.chdir(app_path) init_code(, _models_admin_code) init_code(, _init_admin_code) css_path, templates_path = create_templates_static_files(app_path) os.chdir(css_path) init_code(, _auth_login_css_code) create_blueprint( app_path, , _views_blueprint_code % (, ), _forms_basic_code, templates_path ) auth_templates_path = create_blueprint( app_path, , _auth_views_code, _auth_forms_code, templates_path ) os.chdir(auth_templates_path) init_code(, _auth_login_html_code) admin_path = os.path.join(app_path, ) _mkdir_p(admin_path) os.chdir(admin_path) init_code(, ) init_code(, _admin_views_code) os.chdir(templates_path) admin_templates_path = os.path.join(templates_path, ) _mkdir_p(admin_templates_path) os.chdir(admin_templates_path) init_code(, _admin_index_html_code) init_code(, _admin_logout_html_code) init_done_info()
build a full status project
9,516
def res_obs_v_sim(pst,logger=None, filename=None, **kwargs): if logger is None: logger=Logger(,echo=False) logger.log("plot res_obs_v_sim") if "ensemble" in kwargs: try: res=pst_utils.res_from_en(pst,kwargs[]) except: logger.statement("res_1to1: could not find ensemble file {0}".format(kwargs[])) else: try: res = pst.res except: logger.lraise("res_phi_pie: pst.res is None, couldnobs_v_sim{0}{0}_datetimedatetime_strdatetimedatetime_str1231%y%m%ddatetime_strdatetimedatetime_str01%y%m%ddatetime_strdatetimedatetime_str%y%m%d_-.b-.0.5') ax.set_xlim(obs_g.datetime.min(),obs_g.datetime.max()) ax.grid() ax.set_xlabel("datetime",labelpad=0.1) ax.set_title("{0}) group:{1}, {2} observations". format(abet[ax_count], g, names.shape[0]), loc="left") ax_count += 1 logger.log("plotting obs_v_sim for {0}".format(g)) if axes is None: return for a in range(ax_count,nr*nc): axes[a].set_axis_off() axes[a].set_yticks([]) axes[a].set_xticks([]) plt.tight_layout() figs.append(fig) if filename is not None: with PdfPages(pst.filename.replace(".pst", ".obs_v_sim.pdf")) as pdf: for fig in figs: pdf.savefig(fig) plt.close(fig) logger.log("plot res_obs_v_sim") else: logger.log("plot res_obs_v_sim") return figs
timeseries plot helper...in progress
9,517
def display_for_value(value, request=None): from is_core.utils.compatibility import admin_display_for_value if request and isinstance(value, Model): return render_model_object_with_link(request, value) else: return ( (value and ugettext() or ugettext()) if isinstance(value, bool) else admin_display_for_value(value) )
Converts humanized value examples: boolean True/Talse ==> Yes/No objects ==> object display name with link if current user has permissions to see the object datetime ==> in localized format
9,518
def min_value(self): if self._is_completely_masked: return np.nan * self._data_unit else: return np.min(self.values)
The minimum pixel value of the ``data`` within the source segment.
9,519
def dshield_ip_check(ip): if not is_IPv4Address(ip): return None headers = {: useragent} url = response = requests.get(.format(url, ip), headers=headers) return response.json()
Checks dshield for info on an IP address
9,520
def remove_binding(site, hostheader=, ipaddress=, port=80): *site0example.com*80 name = _get_binding_info(hostheader, ipaddress, port) current_bindings = list_bindings(site) if name not in current_bindings: log.debug(, name) return True ps_cmd = [, , "".format(hostheader), , "".format(ipaddress), , "".format(port)] cmd_ret = _srvmgr(ps_cmd) if cmd_ret[] != 0: msg = \ .format(site, cmd_ret[]) raise CommandExecutionError(msg) if name not in list_bindings(site): log.debug(, site) return True log.error(, site) return False
Remove an IIS binding. Args: site (str): The IIS site name. hostheader (str): The host header of the binding. ipaddress (str): The IP address of the binding. port (int): The TCP port of the binding. Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_binding site='site0' hostheader='example.com' ipaddress='*' port='80'
9,521
def leaveEvent( self, event ): item = self.trackerItem() if ( item ): item.setVisible(False)
Toggles the display for the tracker item.
9,522
def _authenticate_cram_md5(credentials, sock_info): source = credentials.source username = credentials.username password = credentials.password passwd = _password_digest(username, password) cmd = SON([(, 1), (, ), (, Binary(b)), (, 1)]) response = sock_info.command(source, cmd) mac = hmac.HMAC(key=passwd.encode(), digestmod=md5) mac.update(response[]) challenge = username.encode() + b + b(mac.hexdigest()) cmd = SON([(, 1), (, response[]), (, Binary(challenge))]) sock_info.command(source, cmd)
Authenticate using CRAM-MD5 (RFC 2195)
9,523
def _grouper(iterable, n_args, fillvalue=None): args = [iter(iterable)] * n_args return zip_longest(*args, fillvalue=fillvalue)
Banana banana
9,524
def get(self, query, sort, page, size): urlkwargs = { : query, : sort, : size, } communities = Community.filter_communities(query, sort) page = communities.paginate(page, size) links = default_links_pagination_factory(page, urlkwargs) links_headers = map(lambda key: (, .format( key, links[key])), links) return self.make_response( page, headers=links_headers, links_item_factory=default_links_item_factory, page=page, urlkwargs=urlkwargs, links_pagination_factory=default_links_pagination_factory, )
Get a list of all the communities. .. http:get:: /communities/(string:id) Returns a JSON list with all the communities. **Request**: .. sourcecode:: http GET /communities HTTP/1.1 Accept: application/json Content-Type: application/json Host: localhost:5000 :reqheader Content-Type: application/json **Response**: .. sourcecode:: http HTTP/1.0 200 OK Content-Length: 334 Content-Type: application/json [ { "id": "comm1" }, { "id": "comm2" } ] :resheader Content-Type: application/json :statuscode 200: no error
9,525
async def async_enqueue_sync(self, func, *func_args): worker = self.pick_sticky(0) args = (func,) + func_args await worker.enqueue(enums.Task.FUNC, args)
Enqueue an arbitrary synchronous function.
9,526
def _list_templates(settings): for idx, option in enumerate(settings.config.get("project_templates"), start=1): puts(" {0!s:5} {1!s:36}".format( colored.yellow("[{0}]".format(idx)), colored.cyan(option.get("name")) )) if option.get("url"): puts(" {0}\n".format(option.get("url")))
List templates from settings.
9,527
def output_selector_schema(config_cls): config_type = resolve_config_cls_arg(config_cls) check.param_invariant(config_type.is_selector, ) def _wrap(func): def _selector(context, config_value, runtime_value): selector_key, selector_value = single_item(config_value) return func(context, selector_key, selector_value, runtime_value) return _create_output_schema(config_type, _selector) return _wrap
A decorator for a annotating a function that can take the selected properties of a ``config_value`` and an instance of a custom type and materialize it. Args: config_cls (Selector):
9,528
def _join_all_filenames_and_text( self): self.log.info() contentString = u"" for i in self.directoryContents: contentString += u"%(i)s\n" % locals() if os.path.isfile(os.path.join(i)): if i[-4:] in [".png", ".jpg", ".gif"]: continue readFile = codecs.open(i, encoding=, mode=) if ".DS_Store" in i: continue data = readFile.read() contentString += u"%(data)s\n" % locals() readFile.close() self.contentString = contentString self.log.info() return None
*join all file names, driectory names and text content together*
9,529
def predict_encoding(file_path, n_lines=20): import chardet with open(file_path, ) as f: rawdata = b.join([f.readline() for _ in range(n_lines)]) return chardet.detect(rawdata)[]
Get file encoding of a text file
9,530
def pdf_rotate( input: str, counter_clockwise: bool = False, pages: [str] = None, output: str = None, ): infile = open(input, "rb") reader = PdfFileReader(infile) writer = PdfFileWriter() if pages is None: source_pages = reader.pages else: pages = parse_rangearg(pages, len(reader.pages)) source_pages = [reader.getPage(i) for i in pages] for i, page in enumerate(source_pages): if pages is None or i in pages: if counter_clockwise: writer.addPage(page.rotateCounterClockwise(90)) else: writer.addPage(page.rotateClockwise(90)) else: writer.addPage(page) if output is None: outfile = NamedTemporaryFile(delete=False) else: if not os.path.isfile(output) or overwrite_dlg(output): outfile = open(output, "wb") else: return writer.write(outfile) infile.close() outfile.close() if output is None: if overwrite_dlg(input): os.remove(input) move(outfile.name, input) else: os.remove(outfile.name)
Rotate the given Pdf files clockwise or counter clockwise. :param inputs: pdf files :param counter_clockwise: rotate counter clockwise if true else clockwise :param pages: list of page numbers to rotate, if None all pages will be rotated
9,531
def _fmt_auto(cls, x, **kw): f = cls._to_float(x) if abs(f) > 1e8: fn = cls._fmt_exp else: if f - round(f) == 0: fn = cls._fmt_int else: fn = cls._fmt_float return fn(x, **kw)
auto formatting class-method.
9,532
def run_pipelines(pipeline_id_pattern, root_dir, use_cache=True, dirty=False, force=False, concurrency=1, verbose_logs=True, progress_cb=None, slave=False): with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency, thread_name_prefix=) as executor: try: results = [] pending_futures = set() done_futures = set() finished_futures = [] progress_thread = None progress_queue = None status_manager = status_mgr(root_dir) if progress_cb is not None: progress_queue = Queue() progress_thread = threading.Thread(target=progress_report_handler, args=(progress_cb, progress_queue)) progress_thread.start() all_specs = specs_to_execute(pipeline_id_pattern, root_dir, status_manager, force, dirty, results) while True: done = None if len(done_futures) > 0: done = done_futures.pop() finished_futures.append(done) done = done.result()[0] try: spec = all_specs.send(done) except StopIteration: spec = None if spec is None: if len(done_futures) == 0: if len(pending_futures) > 0: done_futures, pending_futures = \ concurrent.futures.wait(pending_futures, return_when=concurrent.futures.FIRST_COMPLETED) continue else: break else: continue if len(spec.validation_errors) > 0: results.append( ExecutionResult(spec.pipeline_id, False, {}, [] + list(map(str, spec.validation_errors))) ) continue if slave: ps = status_manager.get(spec.pipeline_id) ps.init(spec.pipeline_details, spec.source_details, spec.validation_errors, spec.cache_hash) eid = gen_execution_id() if ps.queue_execution(eid, ): success, stats, errors = \ execute_pipeline(spec, eid, use_cache=use_cache) results.append(ExecutionResult( spec.pipeline_id, success, stats, errors )) else: results.append( ExecutionResult(spec.pipeline_id, False, None, []) ) else: f = executor.submit(remote_execute_pipeline, spec, root_dir, use_cache, verbose_logs, progress_queue) pending_futures.add(f) for f in finished_futures: ret = f.result() results.append(ExecutionResult(*ret)) except KeyboardInterrupt: pass finally: if slave: finalize() if progress_thread is not None: progress_queue.put(None) progress_thread.join() return results
Run a pipeline by pipeline-id. pipeline-id supports the '%' wildcard for any-suffix matching. Use 'all' or '%' for running all pipelines
9,533
def from_url(cls, db_url=ALL_SETS_ZIP_URL): r = requests.get(db_url) r.raise_for_status() if r.headers[] == : return cls(json.loads(r.text)) if r.headers[] == : with zipfile.ZipFile(six.BytesIO(r.content), ) as zf: names = zf.namelist() assert len(names) == 1, return cls.from_file(io.TextIOWrapper( zf.open(names[0]), encoding=))
Load card data from a URL. Uses :func:`requests.get` to fetch card data. Also handles zipfiles. :param db_url: URL to fetch. :return: A new :class:`~mtgjson.CardDb` instance.
9,534
def detach_framebuffer(self, screen_id, id_p): if not isinstance(screen_id, baseinteger): raise TypeError("screen_id can only be an instance of type baseinteger") if not isinstance(id_p, basestring): raise TypeError("id_p can only be an instance of type basestring") self._call("detachFramebuffer", in_p=[screen_id, id_p])
Removes the graphics updates target for a screen. in screen_id of type int in id_p of type str
9,535
def compute_evolution_by_frequency( df, id_cols: List[str], date_col: Union[str, Dict[str, str]], value_col: str, freq=1, method: str = , format: str = , offseted_suffix: str = , evolution_col_name: str = , missing_date_as_zero: bool = False, raise_duplicate_error: bool = True ): if missing_date_as_zero: how = fillna = 0 else: how = fillna = None return __compute_evolution( df=df, id_cols=id_cols, value_col=value_col, date_col=date_col, freq=freq, method=method, format=format, offseted_suffix=offseted_suffix, evolution_col_name=evolution_col_name, how=how, fillna=fillna, raise_duplicate_error=raise_duplicate_error )
This function answers the question: how has a value changed on a weekly, monthly, yearly basis ? --- ### Parameters *mandatory :* - `id_cols` (*list*): name of the columns used to create each group. - `date_col` (*str or dict*): either directly the name of the column containing the date or a dictionary with: - `selector` (*str*): the name of the column - `format` (*str*): the format of the date (see [pandas doc]( https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior)) - `value_col` (*str*): name of the column containing the value to compare. *optional :* - `freq` (*int/pd.DateOffset/pd.Serie/dict*): the frequency at which we calculate evolutions - `method` (*str*): either `"abs"` for absolute values or `"pct"` for the evolution in percentage of previous value. - `offseted_suffix` (*str*): suffix of the offseted column. By default, `"_offseted"`. - `evolution_col_name` (*str*): name given to the evolution column. By default, `"evolution_computed"`. - `missing_date_as_zero` (*boolean*): add missing date with zero value. - `raise_duplicate_error` (*boolean*): raise an error when the dataset has duplicated values with the given `id_cols`. - `format` (*str*): `'df'` # Do not change it !!! --- ### Example **Input** | id_cols | value_col | date_col| |:---------:|:------------:|:----------:| | A | 20 | 2010| | | 7 | 2011| | B | 200 | 2010| | | 220 | 2011| | C | 100 | 2011| ```cson compute_evolution_by_frequency: id_cols: "id_cols" date_col: "date_col" value_col: "value_col" ``` **Output** | id_cols | value_col | date_col| evolution| |:---------:|:------------:|:----------:|:---------:| | A | 20 | 2010| null| | | 7 | 2011| -13| | B | 200 | 2010| null| | | 220 | 2011| 20| | C | 100 | 2011| null|
9,536
def controller(self, *paths, **query_kwargs): kwargs = self._normalize_params(*paths, **query_kwargs) if self.controller_path: if "path" in kwargs: paths = self.normalize_paths(self.controller_path, kwargs["path"]) kwargs["path"] = "/".join(paths) else: kwargs["path"] = self.controller_path return self.create(self.root, **kwargs)
create a new url object using the controller path as a base if you have a controller `foo.BarController` then this would create a new Url instance with `host/foo/bar` as the base path, so any *paths will be appended to `/foo/bar` :example: # controller foo.BarController print url # http://host.com/foo/bar/some_random_path print url.controller() # http://host.com/foo/bar print url.controller("che", boom="bam") # http://host/foo/bar/che?boom=bam :param *paths: list, the paths to append to the controller path :param **query_kwargs: dict, any query string params to add
9,537
def table(self, name=DEFAULT_TABLE, **options): if name in self._table_cache: return self._table_cache[name] table_class = options.pop(, self._cls_table) table = table_class(self._cls_storage_proxy(self._storage, name), name, **options) self._table_cache[name] = table return table
Get access to a specific table. Creates a new table, if it hasn't been created before, otherwise it returns the cached :class:`~tinydb.Table` object. :param name: The name of the table. :type name: str :param cache_size: How many query results to cache. :param table_class: Which table class to use.
9,538
def tileAddress(self, zoom, point): "Returns a tile address based on a zoom level and \ a point in the tile" [x, y] = point assert x <= self.MAXX and x >= self.MINX assert y <= self.MAXY and y >= self.MINY assert zoom in range(0, len(self.RESOLUTIONS)) tileS = self.tileSize(zoom) offsetX = abs(x - self.MINX) if self.originCorner == : offsetY = abs(y - self.MINY) elif self.originCorner == : offsetY = abs(self.MAXY - y) col = offsetX / tileS row = offsetY / tileS if x in (self.MINX, self.MAXX) and col.is_integer(): col = max(0, col - 1) if y in (self.MINY, self.MAXY) and row.is_integer(): row = max(0, row - 1) return [ int(math.floor(col)), int(math.floor(row)) ]
Returns a tile address based on a zoom level and \ a point in the tile
9,539
def return_hdr(self): self.fdtfile = None try: self.EEG = loadmat(str(self.filename), struct_as_record=False, squeeze_me=True)[] self.hdf5 = False except NotImplementedError: self.hdf5 = True if not self.hdf5: self.s_freq = self.EEG.srate chan_name = [chan.labels for chan in self.EEG.chanlocs] n_samples = self.EEG.pnts if isinstance(self.EEG.subject, str): subj_id = self.EEG.subject else: subj_id = try: start_time = datetime(*self.EEG.etc.T0) except AttributeError: start_time = DEFAULT_DATETIME if isinstance(self.EEG.datfile, str): self.fdtfile = self.EEG.datfile else: self.data = self.EEG.data else: with File(self.filename) as f: EEG = f[] self.s_freq = EEG[].value.item() chan_name = read_hdf5_chan_name(f, EEG[][]) n_samples = int(EEG[].value.item()) subj_id = read_hdf5_str(EEG[]) try: start_time = datetime(*EEG[][]) except ValueError: start_time = DEFAULT_DATETIME datfile = read_hdf5_str(EEG[]) if datfile == : self.data = EEG[].value.T else: self.fdtfile = datfile if self.fdtfile is not None: memshape = (len(chan_name), int(n_samples)) memmap_file = self.filename.parent / self.fdtfile if not memmap_file.exists(): renamed_memmap_file = self.filename.with_suffix() if not renamed_memmap_file.exists(): raise FileNotFoundError(f) else: memmap_file = renamed_memmap_file self.data = memmap(str(memmap_file), , mode=, shape=memshape, order=) return subj_id, start_time, self.s_freq, chan_name, n_samples, {}
subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header
9,540
def accel_zoom_in(self, *args): for term in self.get_notebook().iter_terminals(): term.increase_font_size() return True
Callback to zoom in.
9,541
def _se_all(self): err = np.expand_dims(self._ms_err, axis=1) t1 = np.diagonal( np.linalg.inv(np.matmul(self.xwins.swapaxes(1, 2), self.xwins)), axis1=1, axis2=2, ) return np.squeeze(np.sqrt(t1 * err))
Standard errors (SE) for all parameters, including the intercept.
9,542
def _post_run_hook(self, runtime): outputs = self.aggregate_outputs(runtime=runtime) self._anat_file = os.path.join(outputs.subjects_dir, outputs.subject_id, , ) self._contour = os.path.join(outputs.subjects_dir, outputs.subject_id, , ) self._masked = False NIWORKFLOWS_LOG.info(, outputs.subject_id) return super(ReconAllRPT, self)._post_run_hook(runtime)
generates a report showing nine slices, three per axis, of an arbitrary volume of `in_files`, with the resulting segmentation overlaid
9,543
def _get_stddevs(self, C, stddev_types, num_sites): sigma_inter = C[] + np.zeros(num_sites) sigma_intra = C[] + np.zeros(num_sites) std = [] for stddev_type in stddev_types: if stddev_type == const.StdDev.TOTAL: std += [np.sqrt(sigma_intra**2 + sigma_inter**2)] elif stddev_type == const.StdDev.INTRA_EVENT: std.append(sigma_intra) elif stddev_type == const.StdDev.INTER_EVENT: std.append(sigma_inter) return std
Return total standard deviation as described in paragraph 5.2 pag 200.
9,544
def docs(ctx, clean=False, browse=False, watch=False): if clean: clean_docs(ctx) if watch: watch_docs(ctx, browse=browse) else: build_docs(ctx, browse=browse)
Build the docs.
9,545
def elastic_install(self): with cd(): if not exists(): sudo(.format( bigdata_conf.elastic_download_url )) sudo() sudo()
elasticsearch install :return:
9,546
def match(self, metadata, user = None): assert isinstance(metadata, self.formatclass) return self.generate(metadata,user)
Does the specified metadata match this template? returns (success,metadata,parameters)
9,547
def configure_cache(app): log = logging.getLogger() log.debug() if not getattr(app, , None): app._cache = {}
Sets up an attribute to cache data in the app context
9,548
def alias_action(self, *args, **kwargs): to = kwargs.pop(, None) if not to: return error_message = ("You can't specify target ({}) as alias " "because it is real action name".format(to) ) if to in list(itertools.chain(*self.aliased_actions.values())): raise Exception(error_message) self.aliased_actions.setdefault(to, []).extend(args)
Alias one or more actions into another one. self.alias_action('create', 'read', 'update', 'delete', to='crud')
9,549
def set_memory_params(self, ksm_interval=None, no_swap=None): self._set(, ksm_interval) self._set(, no_swap, cast=bool) return self._section
Set memory related parameters. :param int ksm_interval: Kernel Samepage Merging frequency option, that can reduce memory usage. Accepts a number of requests (or master process cycles) to run page scanner after. .. note:: Linux only. * http://uwsgi.readthedocs.io/en/latest/KSM.html :param bool no_swap: Lock all memory pages avoiding swapping.
9,550
def create_folder(name, location=): r\\minion-id if name in list_folders(location): return .format(name) with salt.utils.winapi.Com(): task_service = win32com.client.Dispatch("Schedule.Service") task_service.Connect() task_folder = task_service.GetFolder(location) task_folder.CreateFolder(name) if name in list_folders(location): return True else: return False
r''' Create a folder in which to create tasks. :param str name: The name of the folder. This will be displayed in the task scheduler. :param str location: A string value representing the location in which to create the folder. Default is '\\' which is the root for the task scheduler (C:\Windows\System32\tasks). :return: True if successful, False if unsuccessful :rtype: bool CLI Example: .. code-block:: bash salt 'minion-id' task.create_folder <folder_name>
9,551
def locate_ranges(self, starts, stops, strict=True): loc, found = self.locate_intersection_ranges(starts, stops) if strict and np.any(~found): raise KeyError(starts[~found], stops[~found]) return loc
Locate items within the given ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. strict : bool, optional If True, raise KeyError if any ranges contain no entries. Returns ------- loc : ndarray, bool Boolean array with location of entries found. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc = idx.locate_ranges(starts, stops, strict=False) >>> loc array([False, True, True, False, True]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35]
9,552
def forwards(apps, schema_editor): Work = apps.get_model(, ) for work in Work.objects.all(): if not work.slug: work.slug = generate_slug(work.pk) work.save()
Re-save all the Works because something earlier didn't create their slugs.
9,553
def _GetTable(self): result = [] lstr = str for row in self._table: result.append( % self.separator.join(lstr(v) for v in row)) return .join(result)
Returns table, with column headers and separators. Returns: The whole table including headers as a string. Each row is joined by a newline and each entry by self.separator.
9,554
def connection_made(self, transport): self.transport = transport self.remote_ip, self.port = transport.get_extra_info()[:2] logging.debug( .format(self.remote_ip, self.port)) self.future = self.send_package( protomap.CPROTO_REQ_INFO, data=None, timeout=10)
override _SiriDBProtocol
9,555
def compound_powerspec(data, tbin, Df=None, pointProcess=False): return powerspec([np.sum(data, axis=0)], tbin, Df=Df, units=True, pointProcess=pointProcess)
Calculate the power spectrum of the compound/sum signal. data is first summed across units, then the power spectrum is calculated. If pointProcess=True, power spectra are normalized by the length T of the time series. Parameters ---------- data : numpy.ndarray, 1st axis unit, 2nd axis time tbin : float, binsize in ms Df : float/None, window width of sliding rectangular filter (smoothing), None -> no smoothing pointProcess : bool, if set to True, powerspectrum is normalized to signal length T Returns ------- freq : tuple numpy.ndarray of frequencies POW : tuple 1 dim numpy.ndarray, frequency series Examples -------- >>> compound_powerspec(np.array([analog_sig1, analog_sig2]), tbin, Df=Df) Out[1]: (freq,POW) >>> POW.shape Out[2]: (len(analog_sig1),)
9,556
def l2traceroute_result_output_l2_hop_results_l2_hop_ingress_interface_name(self, **kwargs): config = ET.Element("config") l2traceroute_result = ET.Element("l2traceroute_result") config = l2traceroute_result output = ET.SubElement(l2traceroute_result, "output") l2_hop_results = ET.SubElement(output, "l2-hop-results") l2_hop = ET.SubElement(l2_hop_results, "l2-hop") ingress = ET.SubElement(l2_hop, "ingress") interface_name = ET.SubElement(ingress, "interface-name") interface_name.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
9,557
async def writelines(self, lines, eof = False, buffering = True): for l in lines: await self.write(l, False, buffering) if eof: await self.write(b, eof, buffering)
Write lines to current output stream
9,558
def __collapseLineOrCol(self, line, d): if (d == Board.LEFT or d == Board.UP): inc = 1 rg = xrange(0, self.__size-1, inc) else: inc = -1 rg = xrange(self.__size-1, 0, inc) pts = 0 for i in rg: if line[i] == 0: continue if line[i] == line[i+inc]: v = line[i]*2 if v == self.__goal: self.__won = True line[i] = v line[i+inc] = 0 pts += v return (line, pts)
Merge tiles in a line or column according to a direction and return a tuple with the new line and the score for the move on this line
9,559
def render_in_page(request, template): from leonardo.module.web.models import Page page = request.leonardo_page if hasattr( request, ) else Page.objects.filter(parent=None).first() if page: try: slug = request.path_info.split("/")[-2:-1][0] except KeyError: slug = None try: body = render_to_string(template, RequestContext(request, { : request.path, : page, : slug, : True})) response = http.HttpResponseNotFound( body, content_type=CONTENT_TYPE) except TemplateDoesNotExist: response = False return response return False
return rendered template in standalone mode or ``False``
9,560
def _get_rules_from_aws(self): list_of_rules = list() if self.profile: boto3.setup_default_session(profile_name=self.profile) if self.region: ec2 = boto3.client(, region_name=self.region) else: ec2 = boto3.client() security_groups = ec2.describe_security_groups(Filters=self.filters) for group in security_groups[]: group_dict = dict() group_dict[] = group[] group_dict[] = group[] group_dict[] = group.get(, None) if (group.get(, None) or group.get(, None)): group_dict[] = list() for rule in group.get(, None): rule_dict = self._build_rule(rule) rule_dict[] = "INGRESS" group_dict[].append(rule_dict) for rule in group.get(, None): rule_dict = self._build_rule(rule) rule_dict[] = "EGRESS" group_dict[].append(rule_dict) list_of_rules.append(group_dict) return list_of_rules
Load the EC2 security rules off AWS into a list of dict. Returns: list
9,561
def force_constants(self, force_constants): if type(force_constants) is np.ndarray: fc_shape = force_constants.shape if fc_shape[0] != fc_shape[1]: if self._primitive.get_number_of_atoms() != fc_shape[0]: msg = ("Force constants shape disagrees with crystal " "structure setting. This may be due to " "PRIMITIVE_AXIS.") raise RuntimeError(msg) self._force_constants = force_constants if self._primitive.get_masses() is not None: self._set_dynamical_matrix()
Set force constants Parameters ---------- force_constants : array_like Force constants matrix. If this is given in own condiguous ndarray with order='C' and dtype='double', internal copy of data is avoided. Therefore some computational resources are saved. shape=(atoms in supercell, atoms in supercell, 3, 3), dtype='double'
9,562
def scan(host, port=80, url=None, https=False, timeout=1, max_size=65535): starts = OrderedDict() ends = OrderedDict() port = int(port) result = dict( host=host, port=port, state=, durations=OrderedDict() ) if url: timeout = 1 result[] = None starts[] = starts[] = datetime.datetime.now() try: hostip = socket.gethostbyname(host) result[] = hostip ends[] = datetime.datetime.now() except socket.gaierror: raise ScanFailed(, result=result) starts[] = datetime.datetime.now() network_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) network_socket.settimeout(timeout) result_connection = network_socket.connect_ex((hostip, port)) ends[] = datetime.datetime.now() if https: starts[] = datetime.datetime.now() try: network_socket = ssl.wrap_socket(network_socket) except socket.timeout: raise ScanFailed(, result=result) ends[] = datetime.datetime.now() if result_connection == 0 and url: starts[] = datetime.datetime.now() network_socket.send( "GET {0} HTTP/1.0\r\nHost: {1}\r\n\r\n".format( url, host ).encode()) if max_size: data = network_socket.recv(max_size) else: data = network_socket.recv() result[] = len(data) data = data.decode(, errors=) result[] = (data) try: result[] = int(data.split()[0].split()[1]) except IndexError: pass ends[] = datetime.datetime.now() network_socket.close() ends[] = datetime.datetime.now() for duration in starts.keys(): if duration in ends.keys(): result[][duration] = ends[duration] - starts[duration] if result_connection == 0: result[] = return result
Scan a network port Parameters ---------- host : str Host or ip address to scan port : int, optional Port to scan, default=80 url : str, optional URL to perform get request to on the host and port specified https : bool, optional Perform ssl connection on the socket, default=False timeout : float Timeout for network operations, default=1 Returns ------- dict Result dictionary that contains the following keys: host - The host or IP address that was scanned port - The port number that was scanned state - The state of the port, will be either "open" or "closed" durations - An ordered dictionary with floating point value of the time elapsed for each connection operation Raises ------ ScanFailed - The scan operation failed
9,563
def disableGroup(self): radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group] for radioButton in radioButtonListInGroup: radioButton.disable()
Disables all radio buttons in the group
9,564
def reqHeadTimeStamp( self, contract: Contract, whatToShow: str, useRTH: bool, formatDate: int = 1) -> datetime.datetime: return self._run( self.reqHeadTimeStampAsync( contract, whatToShow, useRTH, formatDate))
Get the datetime of earliest available historical data for the contract. Args: contract: Contract of interest. useRTH: If True then only show data from within Regular Trading Hours, if False then show all data. formatDate: If set to 2 then the result is returned as a timezone-aware datetime.datetime with UTC timezone.
9,565
def dump_np_vars(self, store_format=, delimiter=): ret = False if self.system.files.no_output is True: logger.debug() return True if self.write_lst() and self.write_np_dat(store_format=store_format, delimiter=delimiter): ret = True return ret
Dump the TDS simulation data to files by calling subroutines `write_lst` and `write_np_dat`. Parameters ----------- store_format : str dump format in `('csv', 'txt', 'hdf5')` delimiter : str delimiter for the `csv` and `txt` format Returns ------- bool: success flag
9,566
def _concat_translations(translations: List[Translation], stop_ids: Set[int], length_penalty: LengthPenalty, brevity_penalty: Optional[BrevityPenalty] = None) -> Translation: target_ids = [] attention_matrices = [] beam_histories = [] estimated_reference_length = None for idx, translation in enumerate(translations): if idx == len(translations) - 1: target_ids.extend(translation.target_ids) attention_matrices.append(translation.attention_matrix) else: if translation.target_ids[-1] in stop_ids: target_ids.extend(translation.target_ids[:-1]) attention_matrices.append(translation.attention_matrix[:-1, :]) else: target_ids.extend(translation.target_ids) attention_matrices.append(translation.attention_matrix) beam_histories.extend(translation.beam_histories) if translation.estimated_reference_length is not None: if estimated_reference_length is None: estimated_reference_length = translation.estimated_reference_length else: estimated_reference_length += translation.estimated_reference_length attention_shapes = [attention_matrix.shape for attention_matrix in attention_matrices] attention_matrix_combined = np.zeros(np.sum(np.asarray(attention_shapes), axis=0)) pos_t, pos_s = 0, 0 for attention_matrix, (len_t, len_s) in zip(attention_matrices, attention_shapes): attention_matrix_combined[pos_t:pos_t + len_t, pos_s:pos_s + len_s] = attention_matrix pos_t += len_t pos_s += len_s def _brevity_penalty(hypothesis_length, reference_length): return 0.0 if brevity_penalty is None else brevity_penalty.get(hypothesis_length, reference_length) score = sum((translation.score + _brevity_penalty(len(translation.target_ids), translation.estimated_reference_length)) \ * length_penalty.get(len(translation.target_ids)) for translation in translations) score = score / length_penalty.get(len(target_ids)) - _brevity_penalty(len(target_ids), estimated_reference_length) return Translation(target_ids, attention_matrix_combined, score, beam_histories, estimated_reference_length=estimated_reference_length)
Combines translations through concatenation. :param translations: A list of translations (sequence starting with BOS symbol, attention_matrix), score and length. :param stop_ids: The EOS symbols. :param length_penalty: Instance of the LengthPenalty class initialized with alpha and beta. :param brevity_penalty: Optional Instance of the BrevityPenalty class initialized with a brevity weight. :return: A concatenation of the translations with a score.
9,567
def load_map(path, value): tmplstr = textwrap.dedent(.format(path=path, value=value)) return salt.template.compile_template_str( tmplstr, salt.loader.render(__opts__, __salt__), __opts__[], __opts__[], __opts__[])
Loads the map at the specified path, and returns the specified value from that map. CLI Example: .. code-block:: bash # Assuming the map is loaded in your formula SLS as follows: # # {% from "myformula/map.jinja" import myformula with context %} # # the following syntax can be used to load the map and check the # results: salt myminion jinja.load_map myformula/map.jinja myformula
9,568
def adaptive_model_average(X, penalization, method): n_trials = 100 print("Adaptive ModelAverage with:") print(" estimator: QuicGraphicalLasso (default)") print(" n_trials: {}".format(n_trials)) print(" penalization: {}".format(penalization)) print(" adaptive-method: {}".format(method)) lam = 0.5 if penalization == "random": cv_model = QuicGraphicalLassoCV( cv=2, n_refinements=6, n_jobs=1, init_method="cov", score_metric=metric ) cv_model.fit(X) lam = cv_model.lam_ print(" lam: {}".format(lam)) model = AdaptiveGraphicalLasso( estimator=ModelAverage( n_trials=n_trials, penalization=penalization, lam=lam, n_jobs=1 ), method=method, ) model.fit(X) lam_norm_ = np.linalg.norm(model.estimator_.lam_) print(" ||lam_||_2: {}".format(lam_norm_)) return model.estimator_.covariance_, model.estimator_.precision_, lam_norm_
Run ModelAverage in default mode (QuicGraphicalLassoCV) to obtain proportion matrix. NOTE: Only method = 'binary' really makes sense in this case.
9,569
def plot_calibrated_diode(dio_cross,chan_per_coarse=8,feedtype=,**kwargs): obs = Waterfall(dio_cross,max_load=150) freqs = obs.populate_freqs() tsamp = obs.header[] data = obs.data obs = None I,Q,U,V = get_stokes(data,feedtype) data = None psis = phase_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs) G = gain_offsets(I,Q,U,V,tsamp,chan_per_coarse,feedtype,**kwargs) I,Q,U,V = apply_Mueller(I,Q,U,V,G,psis,chan_per_coarse,feedtype) I_OFF,I_ON = foldcal(I,tsamp,**kwargs) Q_OFF,Q_ON = foldcal(Q,tsamp,**kwargs) U_OFF,U_ON = foldcal(U,tsamp,**kwargs) V_OFF,V_ON = foldcal(V,tsamp,**kwargs) I = None Q = None U = None V = None plt.plot(freqs,I_ON-I_OFF,,label=) plt.plot(freqs,Q_ON-Q_OFF,,label=) plt.plot(freqs,U_ON-U_OFF,,label=) plt.plot(freqs,V_ON-V_OFF,,label=) plt.legend() plt.xlabel() plt.title() plt.ylabel()
Plots the corrected noise diode spectrum for a given noise diode measurement after application of the inverse Mueller matrix for the electronics chain.
9,570
def _grab_raw_image(self): m = self.ale.getScreenRGB() return m.reshape((self.height, self.width, 3))
:returns: the current 3-channel image
9,571
def start(self, blocking=True): self.setup_zmq() if blocking: self.serve() else: eventlet.spawn(self.serve) eventlet.sleep(0)
Start the producer. This will eventually fire the ``server_start`` and ``running`` events in sequence, which signify that the incoming TCP request socket is running and the workers have been forked, respectively. If ``blocking`` is False, control .
9,572
def is_module_function(obj, prop): python_version = sys.version_info[0] if python_version == 3: unicode = str if prop and (isinstance(prop, str) or isinstance(prop, unicode)): if prop in dir(obj): if ( isinstance(getattr(obj, prop), FunctionType) or isinstance(getattr(obj, prop), BuiltinFunctionType) or inspect.ismethod(getattr(obj, prop)) ): return True else: ErrorHandler.prop_is_func_error(obj, prop) else: ErrorHandler.prop_in_obj_error(obj, prop) elif prop: ErrorHandler.prop_type_error(prop) return False
Checking and setting type to MODULE_FUNCTION Args: obj: ModuleType prop: FunctionType Return: Boolean Raise: prop_type_error: When the type of prop is not valid prop_in_obj_error: When prop is not in the obj(module/class) prop_is_func_error: When prop is not a callable stuff
9,573
def getargspec(func): if inspect.ismethod(func): func = func.__func__ parts = 0, () if type(func) is partial: keywords = func.keywords if keywords is None: keywords = {} parts = len(func.args), keywords.keys() func = func.func if not inspect.isfunction(func): raise TypeError( % func) args, varargs, varkw = inspect.getargs(func.__code__) func_defaults = func.__defaults__ if func_defaults is None: func_defaults = [] else: func_defaults = list(func_defaults) if parts[0]: args = args[parts[0]:] if parts[1]: for arg in parts[1]: i = args.index(arg) - len(args) del args[i] try: del func_defaults[i] except IndexError: pass return inspect.ArgSpec(args, varargs, varkw, func_defaults)
Used because getargspec for python 2.7 does not accept functools.partial which is the type for pytest fixtures. getargspec excerpted from: sphinx.util.inspect ~~~~~~~~~~~~~~~~~~~ Helpers for inspecting Python modules. :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. Like inspect.getargspec but supports functools.partial as well.
9,574
def from_events(self, instance, ev_args, ctx): def make_from_args(ev_args, parent): el = etree.SubElement(parent, tag_to_str((ev_args[0], ev_args[1]))) for key, value in ev_args[2].items(): el.set(tag_to_str(key), value) return el root_el = make_from_args(ev_args, self.__get__(instance, type(instance))) stack = [root_el] while stack: ev_type, *ev_args = yield if ev_type == "start": stack.append(make_from_args(ev_args, stack[-1])) elif ev_type == "text": curr = stack[-1] if curr.text is not None: curr.text += ev_args[0] else: curr.text = ev_args[0] elif ev_type == "end": stack.pop() else: raise ValueError(ev_type)
Collect the events and convert them to a single XML subtree, which then gets appended to the list at `instance`. `ev_args` must be the arguments of the ``"start"`` event of the new child. This method is suspendable.
9,575
def as_python(self, infile, include_original_shex: bool=False): self._context.resolve_circular_references() body = for k in self._context.ordered_elements(): v = self._context.grammarelts[k] if isinstance(v, (JSGLexerRuleBlock, JSGObjectExpr)): body += v.as_python(k) if isinstance(v, JSGObjectExpr) and not self._context.has_typeid: self._context.directives.append(f) elif isinstance(v, JSGForwardRef): pass elif isinstance(v, (JSGValueType, JSGArrayExpr)): body += f"\n\n\n{k} = {v.signature_type()}" else: raise NotImplementedError("Unknown grammar elt for {}".format(k)) self._context.forward_refs.pop(k, None) body = + .join(self._context.directives) + body return _jsg_python_template.format(infile=infile, original_shex= + self.text if include_original_shex else "", version=__version__, gendate=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), body=body)
Return the python representation of the document
9,576
def return_xyz(self, labels=None): all_labels = self.return_label() if labels is None: labels = all_labels xyz = [] for one_label in labels: idx = all_labels.index(one_label) xyz.append(self.chan[idx].xyz) return asarray(xyz)
Returns the location in xy for some channels. Parameters ---------- labels : list of str, optional the names of the channels. Returns ------- numpy.ndarray a 3xn vector with the position of a channel.
9,577
def poa_horizontal_ratio(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth): cos_poa_zen = aoi_projection(surface_tilt, surface_azimuth, solar_zenith, solar_azimuth) cos_solar_zenith = tools.cosd(solar_zenith) ratio = cos_poa_zen / cos_solar_zenith try: ratio.name = except AttributeError: pass return ratio
Calculates the ratio of the beam components of the plane of array irradiance and the horizontal irradiance. Input all angles in degrees. Parameters ---------- surface_tilt : numeric Panel tilt from horizontal. surface_azimuth : numeric Panel azimuth from north. solar_zenith : numeric Solar zenith angle. solar_azimuth : numeric Solar azimuth angle. Returns ------- ratio : numeric Ratio of the plane of array irradiance to the horizontal plane irradiance
9,578
def mark(self, partition, offset): max_offset = max(offset + 1, self.high_water_mark.get(partition, 0)) self.logger.debug("Setting high-water mark to: %s", {partition: max_offset}) self.high_water_mark[partition] = max_offset
Set the high-water mark in the current context. In order to know the current partition, it is helpful to initialize the consumer to provide partition info via: .. code:: python consumer.provide_partition_info()
9,579
def char_sets(): if not hasattr(char_sets, ): clist = [] try: data = requests.get( ) except requests.exceptions.RequestException: return [] for line in data.iter_lines(): if line: line = line.decode("utf-8") if line.count() > 0: vals = line.split() if vals[0]: clist.append(vals[0]) else: clist.append(vals[1]) char_sets.setlist = clist return char_sets.setlist
Return a list of the IANA Character Sets, or an empty list if the IANA website is unreachable. Store it as a function attribute so that we only build the list once.
9,580
def parse_translation(f, lineno): line = f.readline() def get_line(f, line, need_keys, lineno, default=): line = line.rstrip() if not line: return lineno, need_keys[0], default, line key, value = line.split(, 1) if key not in need_keys: print % (need_keys, lineno, line) raise RuntimeError("parse error") v = value while 1: line = f.readline() line = line.rstrip() lineno += 1 if not line or line[0] != : break v += + line[:] return lineno, key, v, line comments = [] while 1: if not line: return lineno, None, None, None if line.strip() == : return lineno, comments, None, None elif line[0] == : comments.append(line[:-1]) else: break line = f.readline() lineno += 1 lineno, key, msgid, line = get_line(f, line, [], lineno) lineno, key, value, line = get_line(f, line, [, ], lineno) if key == : msgid = (msgid, value) lineno, key, v1, line = get_line(f, line, [], lineno) lineno, key, v2, line = get_line(f, line, [], lineno) msgstr = (v1, v2) else: msgstr = value if line != : print % (f.name, line) raise RuntimeError("parse error") return lineno, comments, msgid, msgstr
Read a single translation entry from the file F and return a tuple with the comments, msgid and msgstr. The comments is returned as a list of lines which do not end in new-lines. The msgid and msgstr are strings, possibly with embedded newlines
9,581
def _render_reward(self, r: np.float32) -> None: print("reward = {:.4f}".format(float(r))) print()
Prints reward `r`.
9,582
def get_flexports_output_flexport_list_port_id(self, **kwargs): config = ET.Element("config") get_flexports = ET.Element("get_flexports") config = get_flexports output = ET.SubElement(get_flexports, "output") flexport_list = ET.SubElement(output, "flexport-list") port_id = ET.SubElement(flexport_list, "port-id") port_id.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
9,583
def associate_keys(user_dict, client): added_keys = user_dict[] print ">>>Updating Keys-Machines association" for key in added_keys: machines = added_keys[key][] if machines: try: for machine in machines: cloud_id = machine[0] machine_id = machine[1] ssh_user = machine[3] ssh_port = machine[-1] key = client.keys[key] cloud = cloud_from_id(client, cloud_id) cloud.update_machines() mach = machine_from_id(cloud, machine_id) public_ips = mach.info.get(, None) if public_ips: host = public_ips[0] else: host = "" key.associate_to_machine(cloud_id=cloud_id, machine_id=machine_id, host=host, ssh_port=ssh_port, ssh_user=ssh_user) print "associated machine %s" % machine_id except Exception as e: pass client.update_keys() print
This whole function is black magic, had to however cause of the way we keep key-machine association
9,584
def gc(cn, ns=None, lo=None, iq=None, ico=None, pl=None): return CONN.GetClass(cn, ns, LocalOnly=lo, IncludeQualifiers=iq, IncludeClassOrigin=ico, PropertyList=pl)
This function is a wrapper for :meth:`~pywbem.WBEMConnection.GetClass`. Retrieve a class. Parameters: cn (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class to be retrieved (case independent). If specified as a `CIMClassName` object, its `host` attribute will be ignored. ns (:term:`string`): Name of the CIM namespace to be used (case independent). If `None`, defaults to the namespace of the `cn` parameter if specified as a `CIMClassName`, or to the default namespace of the connection. lo (:class:`py:bool`): LocalOnly flag: Exclude inherited properties. `None` will cause the server default of `True` to be used. iq (:class:`py:bool`): IncludeQualifiers flag: Include qualifiers. `None` will cause the server default of `True` to be used. ico (:class:`py:bool`): IncludeClassOrigin flag: Include class origin information for properties and methods in the retrieved class. `None` will cause the server default of `False` to be used. pl (:term:`string` or :term:`py:iterable` of :term:`string`): PropertyList: Names of properties to be included (if not otherwise excluded). An empty iterable indicates to include no properties. If `None`, all properties will be included. Returns: :class:`~pywbem.CIMClass`: The retrieved class.
9,585
def get_res(ds, t_srs=None, square=False): gt = ds.GetGeoTransform() ds_srs = get_ds_srs(ds) res = [gt[1], np.abs(gt[5])] if square: res = [np.mean(res), np.mean(res)] if t_srs is not None and not ds_srs.IsSame(t_srs): if True: extent = ds_extent(ds, t_srs) diag = np.sqrt((extent[2]-extent[0])**2 + (extent[3]-extent[1])**2) res = diag / np.sqrt(ds.RasterXSize**2 + ds.RasterYSize**2) res = [res, res] else: ct = osr.CoordinateTransformation(ds_srs, t_srs) pt = get_center(ds) pt_ct = ct.TransformPoint(*pt) pt_ct_plus = ct.TransformPoint(pt[0] + gt[1], pt[1] + gt[5]) res = [pt_ct_plus[0] - pt_ct[0], np.abs(pt_ct_plus[1] - pt_ct[1])] return res
Get GDAL Dataset raster resolution
9,586
def fit(self, X, y): self._word_vocab.add_documents(X) self._label_vocab.add_documents(y) if self._use_char: for doc in X: self._char_vocab.add_documents(doc) self._word_vocab.build() self._char_vocab.build() self._label_vocab.build() return self
Learn vocabulary from training set. Args: X : iterable. An iterable which yields either str, unicode or file objects. Returns: self : IndexTransformer.
9,587
def commit_sell(self, account_id, sell_id, **params): response = self._post( , , account_id, , sell_id, , data=params) return self._make_api_object(response, Sell)
https://developers.coinbase.com/api/v2#commit-a-sell
9,588
def to_dict(self): out = {} for key in self.__dict__.keys(): if key not in [, , , ]: out[key] = self.__dict__[key] return out
Return the node as a dictionary. Returns ------- dict: All the attributes of this node as a dictionary (minus the left and right).
9,589
def copy_directory(src, dest, force=False): if os.path.exists(dest) and force is True: shutil.rmtree(dest) try: shutil.copytree(src, dest) except OSError as e: sys.exit(1)
Copy an entire directory recursively
9,590
def info(self, msg, indent=0, **kwargs): return self.logger.info(self._indent(msg, indent), **kwargs)
invoke ``self.info.debug``
9,591
def _collapse_outgroup(tree, taxdicts): outg = taxdicts[0]["p4"] if not all([i["p4"] == outg for i in taxdicts]): raise Exception("no good") tre = ete.Tree(tree.write(format=1)) alltax = [i for i in tre.get_leaf_names() if i not in outg] alltax += [outg[0]] tre.prune(alltax) tre.search_nodes(name=outg[0])[0].name = "outgroup" tre.ladderize() taxd = copy.deepcopy(taxdicts) newtaxdicts = [] for test in taxd: test["p4"] = ["outgroup"] newtaxdicts.append(test) return tre, newtaxdicts
collapse outgroup in ete Tree for easier viewing
9,592
def request_fetch(self, user, repo, request, pull=False, force=False): raise NotImplementedError
Fetches given request as a branch, and switch if pull is true :param repo: name of the repository to create Meant to be implemented by subclasses
9,593
def enable_llama_ha(self, new_llama_host_id, zk_service_name=None, new_llama_role_name=None): args = dict( newLlamaHostId = new_llama_host_id, zkServiceName = zk_service_name, newLlamaRoleName = new_llama_role_name ) return self._cmd(, data=args, api_version=8)
Enable high availability for an Impala Llama ApplicationMaster. This command only applies to CDH 5.1+ Impala services. @param new_llama_host_id: id of the host where the second Llama role will be added. @param zk_service_name: Name of the ZooKeeper service to use for auto-failover. If Impala's ZooKeeper dependency is already set, then that ZooKeeper service will be used for auto-failover, and this parameter may be omitted. @param new_llama_role_name: Name of the new Llama role. If omitted, a name will be generated automatically. @return: Reference to the submitted command. @since: API v8
9,594
def start(vm_name, call=None): start on the instance. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt-cloud -a start myinstance actionThe start action must be called with -a or --action.cloud.fire_eventeventstart instancesalt/cloud/{0}/startingnamesock_dirtransportcloud.fire_eventeventstart instancesalt/cloud/{0}/startednamesock_dirtransport'] ) return result
Call GCE 'start on the instance. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt-cloud -a start myinstance
9,595
def get_command_arg_list(self, command_name: str, to_parse: Union[Statement, str], preserve_quotes: bool) -> Tuple[Statement, List[str]]: if not isinstance(to_parse, Statement): to_parse = self.parse(command_name + + to_parse, expand=False) if preserve_quotes: return to_parse, to_parse.arg_list else: return to_parse, to_parse.argv[1:]
Called by the argument_list and argparse wrappers to retrieve just the arguments being passed to their do_* methods as a list. :param command_name: name of the command being run :param to_parse: what is being passed to the do_* method. It can be one of two types: 1. An already parsed Statement 2. An argument string in cases where a do_* method is explicitly called e.g.: Calling do_help('alias create') would cause to_parse to be 'alias create' In this case, the string will be converted to a Statement and returned along with the argument list. :param preserve_quotes: if True, then quotes will not be stripped from the arguments :return: A tuple containing: The Statement used to retrieve the arguments The argument list
9,596
def _HasId(self, schedule, entity_id): try: self._GetById(schedule, entity_id) has = True except KeyError: has = False return has
Check if the schedule has an entity with the given id. Args: schedule: The transitfeed.Schedule instance to look in. entity_id: The id of the entity. Returns: True if the schedule has an entity with the id or False if not.
9,597
def find_segment(self, ea): for seg in self.seglist: if seg.startea <= ea < seg.endea: return seg
do a linear search for the given address in the segment list
9,598
def edit_by_id( self, id_equip_acesso, id_tipo_acesso, fqdn, user, password, enable_pass): if not is_valid_int_param(id_tipo_acesso): raise InvalidParameterError( u) equipamento_acesso_map = dict() equipamento_acesso_map[] = fqdn equipamento_acesso_map[] = user equipamento_acesso_map[] = password equipamento_acesso_map[] = enable_pass equipamento_acesso_map[] = id_tipo_acesso equipamento_acesso_map[] = id_equip_acesso url = code, xml = self.submit( {: equipamento_acesso_map}, , url) return self.response(code, xml)
Edit access type, fqdn, user, password and enable_pass of the relationship of equipment and access type. :param id_tipo_acesso: Access type identifier. :param id_equip_acesso: Equipment identifier. :param fqdn: Equipment FQDN. :param user: User. :param password: Password. :param enable_pass: Enable access. :return: None :raise InvalidParameterError: The parameters fqdn, user, password or access type id are invalid or none. :raise EquipamentoAcessoNaoExisteError: Equipment access type relationship doesn't exist. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
9,599
def find_visible_elements(self, selector, by=By.CSS_SELECTOR, limit=0): self.wait_for_ready_state_complete() if page_utils.is_xpath_selector(selector): by = By.XPATH if page_utils.is_link_text_selector(selector): selector = page_utils.get_link_text_from_selector(selector) by = By.LINK_TEXT v_elems = page_actions.find_visible_elements(self.driver, selector, by) if limit and limit > 0 and len(v_elems) > limit: v_elems = v_elems[:limit] return v_elems
Returns a list of matching WebElements that are visible. If "limit" is set and > 0, will only return that many elements.