Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
13,400
def get_line_numbers(self, buffer): from_, to = self.operator_range(buffer.document) from_ += buffer.cursor_position to += buffer.cursor_position from_, _ = buffer.document.translate_index_to_position(from_) to, _ = buffer.document.translate_index_to_position(to) return from_, to
Return a (start_line, end_line) pair.
13,401
def solve_gfl(data, edges=None, weights=None, minlam=0.2, maxlam=1000.0, numlam=30, alpha=0.2, inflate=2., converge=1e-6, maxsteps=1000000, lam=None, verbose=0, missing_val=None, full_path=False, loss=): if edges.shape[0] < 1: return data init_edges = edges if verbose: print() if loss == : flat_data = data[0].flatten() nonmissing_flat_data = flat_data, data[1].flatten() else: flat_data = data.flatten() nonmissing_flat_data = flat_data if edges is None: if loss == : if verbose: print(.format(data[0].shape)) edges = hypercube_edges(data[0].shape) else: if verbose: print(.format(data.shape)) edges = hypercube_edges(data.shape) if missing_val is not None: if verbose: print(.format(missing_val)) edges = [(e1,e2) for (e1,e2) in edges if flat_data[e1] != missing_val and flat_data[e2] != missing_val] if loss == : nonmissing_flat_data = flat_data[flat_data != missing_val], nonmissing_flat_data[1][flat_data != missing_val] else: nonmissing_flat_data = flat_data[flat_data != missing_val] g = Graph() g.add_edges_from(edges) chains = decompose_graph(g, heuristic=) ntrails, trails, breakpoints, edges = chains_to_trails(chains) if verbose: print() if loss == : solver = TrailSolver(alpha, inflate, maxsteps, converge) elif loss == : solver = LogisticTrailSolver(alpha, inflate, maxsteps, converge) elif loss == : solver = BinomialTrailSolver(alpha, inflate, maxsteps, converge) else: raise NotImplementedError() solver.set_data(nonmissing_flat_data, edges, ntrails, trails, breakpoints, weights=weights) if verbose: print() if lam: beta = solver.solve(lam) else: beta = solver.solution_path(minlam, maxlam, numlam, verbose=max(0, verbose-1)) if not full_path: beta = beta[] mask = np.ones_like(beta) mask[init_edges[:,0]] = 0 mask[init_edges[:,1]] = 0 beta[mask>0] = data[mask>0] return beta
A very easy-to-use version of GFL solver that just requires the data and the edges.
13,402
def __find_block_neighbors(self, block, level_blocks, unhandled_block_indexes): neighbors = [] handled_block_indexes = [] for unhandled_index in unhandled_block_indexes: if block.is_neighbor(level_blocks[unhandled_index]): handled_block_indexes.append(unhandled_index) neighbors.append(level_blocks[unhandled_index]) if len(neighbors) == 8: break for handled_index in handled_block_indexes: unhandled_block_indexes.remove(handled_index) return neighbors
! @brief Search block neighbors that are parts of new clusters (density is greater than threshold and that are not cluster members yet), other neighbors are ignored. @param[in] block (bang_block): BANG-block for which neighbors should be found (which can be part of cluster). @param[in] level_blocks (list): BANG-blocks on specific level. @param[in] unhandled_block_indexes (set): Blocks that have not been processed yet. @return (list) Block neighbors that can become part of cluster.
13,403
def call_function(self, command, response_length=0, params=[], timeout_sec=1): data = bytearray(2+len(params)) data[0] = PN532_HOSTTOPN532 data[1] = command & 0xFF data[2:] = params self._write_frame(data) if not self._wait_ready(timeout_sec): return None response = self._read_data(len(PN532_ACK)) if response != PN532_ACK: raise RuntimeError() if not self._wait_ready(timeout_sec): return None response = self._read_frame(response_length+2) if not (response[0] == PN532_PN532TOHOST and response[1] == (command+1)): raise RuntimeError() return response[2:]
Send specified command to the PN532 and expect up to response_length bytes back in a response. Note that less than the expected bytes might be returned! Params can optionally specify an array of bytes to send as parameters to the function call. Will wait up to timeout_secs seconds for a response and return a bytearray of response bytes, or None if no response is available within the timeout.
13,404
def read_altitude(self, sealevel_pa=101325.0): pressure = float(self.read_pressure()) altitude = 44330.0 * (1.0 - pow(pressure / sealevel_pa, (1.0/5.255))) self.logger.debug(, altitude) return altitude
Calculates the altitude in meters.
13,405
def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path="~/.gcetools/instances", group_name=None, region=None, zone=None): data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone) roles = _get_roles(data) env.roledefs.update(roles) _data_loaded = True return INSTANCES_CACHE
Dynamically update fabric's roles by using assigning the tags associated with each machine in Google Compute Engine. use_cache - will store a local cache in ~/.gcetools/ cache_expiration - cache expiration in seconds (default: 1 day) cache_path - the path to store instances data (default: ~/.gcetools/instances) group_name - optional managed instance group to use instead of the global instance pool region - gce region name (such as `us-central1`) for a regional managed instance group zone - gce zone name (such as `us-central1-a`) for a zone managed instance group How to use: - Call 'update_roles_gce' at the end of your fabfile.py (it will run each time you run fabric). - On each function use the regular @roles decorator and set the role to the name of one of the tags associated with the instances you wish to work with
13,406
def get_disconnect_message(self, code: int): self.order += 1 return { : , : None, : self.path, : self.order, : code, }
http://channels.readthedocs.io/en/stable/asgi/www.html#disconnection
13,407
def rgb2ansi(r, g, b): grayscale = False poss = True step = 2.5 while poss: if min(r, g, b) < step: grayscale = max(r, g, b) < step poss = False step += 42.5 if grayscale: return 232 + int(float(sum((r, g, b)) / 33.0)) m = ((r, 36), (g, 6), (b, 1)) return 16 + sum(int(6 * float(val) / 256) * mod for val, mod in m)
Convert an RGB color to 256 ansi graphics.
13,408
def _login_request(self, username=None, secret=None): url = + self._host + params = {} if username: params[] = username if secret: params[] = secret plain = self._request(url, params) dom = xml.dom.minidom.parseString(plain) sid = get_text(dom.getElementsByTagName()[0].childNodes) challenge = get_text( dom.getElementsByTagName()[0].childNodes) return (sid, challenge)
Send a login request with paramerters.
13,409
def _run(self): if KSER_METRICS_ENABLED == "yes": KSER_TASK_COUNT.inc() logger.debug( "{}.Run: {}[{}]".format( self.__class__.__name__, self.__class__.path, self.uuid ), extra=dict( kmsg=Message( self.uuid, entrypoint=self.__class__.path, params=self.params, metadata=self.metadata ).dump() ) ) return self.run()
Execution body :return: Execution result :rtype: kser.result.Result
13,410
def get_last_api_metadata(self): last_metadata = None for key, api in iteritems(self.apis): api_client = api.api_client if api_client is not None: metadata = api_client.get_last_metadata() if metadata is not None and metadata.get(, None) is not None: if last_metadata is None: last_metadata = metadata elif metadata["timestamp"] >= last_metadata["timestamp"]: last_metadata = metadata if last_metadata is not None: last_metadata = ApiMetadata(last_metadata.get("url"), last_metadata.get("method"), last_metadata.get("response", None), last_metadata.get("return_data", None), last_metadata.get("exception", None)) return last_metadata
Get meta data for the last Mbed Cloud API call. :returns: meta data of the last Mbed Cloud API call :rtype: ApiMetadata
13,411
def index_humansorted(seq, key=None, reverse=False, alg=ns.DEFAULT): return index_natsorted(seq, key, reverse, alg | ns.LOCALE)
This is a wrapper around ``index_natsorted(seq, alg=ns.LOCALE)``. Parameters ---------- seq: iterable The input to sort. key: callable, optional A key used to determine how to sort each element of the sequence. It is **not** applied recursively. It should accept a single argument and return a single value. reverse : {{True, False}}, optional Return the list in reversed sorted order. The default is `False`. alg : ns enum, optional This option is used to control which algorithm `natsort` uses when sorting. For details into these options, please see the :class:`ns` class documentation. The default is `ns.LOCALE`. Returns ------- out : tuple The ordered indexes of the input. See Also -------- humansorted order_by_index Notes ----- Please read :ref:`locale_issues` before using `humansorted`. Examples -------- Use `index_humansorted` just like the builtin `sorted`:: >>> a = ['Apple', 'Banana', 'apple', 'banana'] >>> index_humansorted(a) [2, 0, 3, 1]
13,412
def save_to_file(self, path): with open(path, ) as out: out.write(json.dumps(self.get_dict()))
Dump all cookies to file. Cookies are dumped as JSON-serialized dict of keys and values.
13,413
def tAx(mt, x, t): return mt.Mx[x + t] / mt.Dx[x]
n/Ax : Returns the EPV (net single premium) of a deferred whole life insurance.
13,414
def get_notificant(self, id, **kwargs): kwargs[] = True if kwargs.get(): return self.get_notificant_with_http_info(id, **kwargs) else: (data) = self.get_notificant_with_http_info(id, **kwargs) return data
Get a specific notification target # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_notificant(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: ResponseContainerNotificant If the method is called asynchronously, returns the request thread.
13,415
def run(self): try: quit_request_detected = False while True: function, arguments = self.task_queue.get() if function is None: self.config.logger.info() break if quit_request_detected: continue try: try: args, kwargs = arguments except ValueError: args = arguments kwargs = {} function(*args, **kwargs) except Exception: self.config.logger.error("Error in processing a job", exc_info=True) except KeyboardInterrupt: self.config.logger.info() quit_request_detected = True except Exception: self.config.logger.critical("Failure in task_queue", exc_info=True)
The main routine for a thread's work. The thread pulls tasks from the task queue and executes them until it encounters a death token. The death token is a tuple of two Nones.
13,416
def zSetSurfaceData(self, surfNum, radius=None, thick=None, material=None, semidia=None, conic=None, comment=None): if self.pMode == 0: surf = self.pLDE.GetSurfaceAt(surfNum) if radius is not None: surf.pRadius = radius if thick is not None: surf.pThickness = thick if material is not None: surf.pMaterial = material if semidia is not None: surf.pSemiDiameter = semidia if conic is not None: surf.pConic = conic if comment is not None: surf.pComment = comment else: raise NotImplementedError()
Sets surface data
13,417
def get_conditional_instance(self, parameter_names): if not isinstance(parameter_names, list): parameter_names = [parameter_names] for iname, name in enumerate(parameter_names): name = str(name).lower() parameter_names[iname] = name assert name in self.jco.col_names,\ "contribution parameter " + name + " not found jco" keep_names = [] for name in self.jco.col_names: if name not in parameter_names: keep_names.append(name) if len(keep_names) == 0: raise Exception("Schur.contribution_from_Parameters " + "atleast one parameter must remain uncertain") if self.predictions is None: raise Exception("Schur.contribution_from_Parameters " + "no predictions have been set") cond_preds = self.predictions.get(row_names=keep_names) la_cond = Schur(jco=self.jco.get(self.jco.row_names, keep_names), parcov=self.parcov.condition_on(parameter_names), obscov=self.obscov, predictions=cond_preds,verbose=False) return la_cond
get a new Schur instance that includes conditional update from some parameters becoming known perfectly Parameters ---------- parameter_names : list parameters that are to be treated as notionally perfectly known Returns ------- la_cond : Schur a new Schur instance conditional on perfect knowledge of some parameters Note ---- this method is used by the get_parameter_contribution() method - don't call this method directly
13,418
def global_include(self, pattern): if self.allfiles is None: self.findall() match = translate_pattern(os.path.join(, pattern)) found = [f for f in self.allfiles if match.match(f)] self.extend(found) return bool(found)
Include all files anywhere in the current directory that match the pattern. This is very inefficient on large file trees.
13,419
def disconnect(self): if self._connected: self._connected = False self._conn.disconnect()
Gracefully close connection to stomp server.
13,420
def extend_left_to(self, window, max_size): rofs = self.ofs - window.ofs_end() nsize = rofs + self.size rofs -= nsize - min(nsize, max_size) self.ofs = self.ofs - rofs self.size += rofs
Adjust the offset to start where the given window on our left ends if possible, but don't make yourself larger than max_size. The resize will assure that the new window still contains the old window area
13,421
def list_numbers(self, **kwargs): kwargs[] = True if kwargs.get(): return self.list_numbers_with_http_info(**kwargs) else: (data) = self.list_numbers_with_http_info(**kwargs) return data
Get your numbers # noqa: E501 List all your purchased numbers # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_numbers(async=True) >>> result = thread.get() :param async bool :return: ResponseNumberList If the method is called asynchronously, returns the request thread.
13,422
def _serialize_list(cls, list_): list_serialized = [] for item in list_: item_serialized = cls.serialize(item) list_serialized.append(item_serialized) return list_serialized
:type list_: list :rtype: list
13,423
def process_pybel_graph(graph): bp = PybelProcessor(graph) bp.get_statements() if bp.annot_manager.failures: logger.warning(, sum(len(v) for v in bp.annot_manager.failures.values())) return bp
Return a PybelProcessor by processing a PyBEL graph. Parameters ---------- graph : pybel.struct.BELGraph A PyBEL graph to process Returns ------- bp : PybelProcessor A PybelProcessor object which contains INDRA Statements in bp.statements.
13,424
def run_gatk_germline_pipeline(job, samples, config): work_dir = job.fileStore.getLocalTempDir() st = os.statvfs(work_dir) config.available_disk = st.f_bavail * st.f_frsize num_samples = len(samples) if config.joint_genotype and not 30 < num_samples < 200: job.fileStore.logToMaster( % num_samples) shared_files = Job.wrapJobFn(download_shared_files, config).encapsulate() job.addChild(shared_files) if config.preprocess_only: for sample in samples: shared_files.addChildJobFn(prepare_bam, sample.uuid, sample.url, shared_files.rv(), paired_url=sample.paired_url, rg_line=sample.rg_line) else: run_pipeline = Job.wrapJobFn(gatk_germline_pipeline, samples, shared_files.rv()).encapsulate() shared_files.addChild(run_pipeline) if config.run_oncotator: annotate = Job.wrapJobFn(annotate_vcfs, run_pipeline.rv(), shared_files.rv()) run_pipeline.addChild(annotate)
Downloads shared files and calls the GATK best practices germline pipeline for a cohort of samples :param JobFunctionWrappingJob job: passed automatically by Toil :param list[GermlineSample] samples: List of GermlineSample namedtuples :param Namespace config: Configuration options for pipeline Requires the following config attributes: config.preprocess_only If True, then stops pipeline after preprocessing steps config.joint_genotype If True, then joint genotypes cohort config.run_oncotator If True, then adds Oncotator to pipeline Additional parameters are needed for downstream steps. Refer to pipeline README for more information.
13,425
def _calc_checksum(self, secret): return str_to_uascii( hashlib.sha256(mysql_aes_encrypt(self.salt, secret)).hexdigest() )
Calculate string. :param secret: The secret key. :returns: The checksum.
13,426
def setup(self): if self._started: return self._common_setup() if platform.type == "posix": self._reactor.callFromThread(self._startReapingProcesses) if self._startLoggingWithObserver: observer = ThreadLogObserver(PythonLoggingObserver().emit) def start(): from twisted.python import log original = log.showwarning log.showwarning = warnings.showwarning self._startLoggingWithObserver(observer, False) log.showwarning = original self._reactor.callFromThread(start) self._reactor.addSystemEventTrigger( "after", "shutdown", observer.stop) t = threading.Thread( target=lambda: self._reactor.run(installSignalHandlers=False), name="CrochetReactor") t.start() self._atexit_register(self._reactor.callFromThread, self._reactor.stop) self._atexit_register(_store.log_errors) if self._watchdog_thread is not None: self._watchdog_thread.start()
Initialize the crochet library. This starts the reactor in a thread, and connect's Twisted's logs to Python's standard library logging module. This must be called at least once before the library can be used, and can be called multiple times.
13,427
def deserialize_upload(value, url): result = {: None, : None} try: result = signing.loads(value, salt=url) except signing.BadSignature: pass else: try: result[] = get_storage_class(result[]) except (ImproperlyConfigured, ImportError): result = {: None, : None} return result
Restore file and name and storage from serialized value and the upload url.
13,428
def ordersku_update(self, oid, sku_id=None, sku_props=None): request = TOPRequest() request[] = oid if sku_id!=None: request[] = sku_id if sku_props!=None: request[] = sku_props self.create(self.execute(request)[]) return self
taobao.trade.ordersku.update 更新交易订单的销售属性 需要商家或以上权限才可调用此接口,可重复调用本接口更新交易备注,本接口同时具有添加备注的功能
13,429
def decode_offset_fetch_response(cls, response): return [ kafka.structs.OffsetFetchResponsePayload( topic, partition, offset, metadata, error ) for topic, partitions in response.topics for partition, offset, metadata, error in partitions ]
Decode OffsetFetchResponse to OffsetFetchResponsePayloads Arguments: response: OffsetFetchResponse
13,430
def window(data, param): name = param.name edges = param.edges where = param.where sortColumns = param.sort calc_value = jx_expression_to_function( param.value ) aggregate = param.aggregate _range = ( param.range ) data = filter(data, where) if not aggregate and not edges: if sortColumns: data = sort(data, sortColumns, already_normalized=True) for rownum, r in enumerate(data): try: r[name] = calc_value(r, rownum, data) except Exception as e: raise e return try: edge_values = [e.value.var for e in edges] except Exception as e: raise Log.error("can only support simple variable edges", cause=e) if not aggregate or aggregate == "none": for _, values in groupby(data, edge_values): if not values: continue if sortColumns: sequence = sort(values, sortColumns, already_normalized=True) else: sequence = values for rownum, r in enumerate(sequence): r[name] = calc_value(r, rownum, sequence) return for keys, values in groupby(data, edge_values): if not values: continue sequence = sort(values, sortColumns) for rownum, r in enumerate(sequence): r["__temp__"] = calc_value(r, rownum, sequence) head = coalesce(_range.max, _range.stop) tail = coalesce(_range.min, _range.start) total = aggregate() for i in range(tail, head): total.add(sequence[i].__temp__) for i, r in enumerate(sequence): r[name] = total.end() total.add(sequence[i + head].__temp__) total.sub(sequence[i + tail].__temp__) for r in data: r["__temp__"] = None
MAYBE WE CAN DO THIS WITH NUMPY (no, the edges of windows are not graceful with numpy) data - list of records
13,431
def post_structure(entry, site): author = entry.authors.all()[0] return {: entry.title, : six.text_type(entry.html_content), : % (PROTOCOL, site.domain, entry.get_absolute_url()), : % (PROTOCOL, site.domain, entry.get_absolute_url()), : [cat.title for cat in entry.categories.all()], : DateTime(entry.creation_date.isoformat()), : entry.pk, : author.get_username(), : entry.excerpt, : int(entry.comment_enabled), : (int(entry.pingback_enabled) or int(entry.trackback_enabled)), : entry.tags, : author.get_username(), : author.pk, : author.__str__(), : entry.password, : entry.slug, : entry.featured}
A post structure with extensions.
13,432
def init_autoindex(self, auto_interval): if not auto_interval: return from pywb.manager.autoindex import AutoIndexer colls_dir = self.warcserver.root_dir if self.warcserver.root_dir else None indexer = AutoIndexer(colls_dir=colls_dir, interval=int(auto_interval)) if not os.path.isdir(indexer.root_path): msg = logging.error(msg.format(indexer.root_path)) import sys sys.exit(2) msg = logging.info(msg.format(indexer.root_path, auto_interval)) indexer.start()
Initialize and start the auto-indexing of the collections. If auto_interval is None this is a no op. :param str|int auto_interval: The auto-indexing interval from the configuration file or CLI argument
13,433
def all(self, list_id, subscriber_hash, **queryparams): subscriber_hash = check_subscriber_hash(subscriber_hash) self.list_id = list_id self.subscriber_hash = subscriber_hash return self._mc_client._get(url=self._build_path(list_id, , subscriber_hash, ), **queryparams)
Get the last 50 events of a member’s activity on a specific list, including opens, clicks, and unsubscribes. :param list_id: The unique id for the list. :type list_id: :py:class:`str` :param subscriber_hash: The MD5 hash of the lowercase version of the list member’s email address. :type subscriber_hash: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []
13,434
def _register_factory(self, factory_name, factory, override): if not factory_name or not is_string(factory_name): raise ValueError("A factory name must be a non-empty string") if not inspect.isclass(factory): raise TypeError( "Invalid factory class ".format(type(factory).__name__) ) with self.__factories_lock: if factory_name in self.__factories: if override: _logger.info("Overriding factory ", factory_name) else: raise ValueError( " factory already exist".format(factory_name) ) self.__factories[factory_name] = factory self._fire_ipopo_event( constants.IPopoEvent.REGISTERED, factory_name )
Registers a component factory :param factory_name: The name of the factory :param factory: The factory class object :param override: If true, previous factory is overridden, else an exception is risen if a previous factory with that name already exists :raise ValueError: The factory name already exists or is invalid :raise TypeError: Invalid factory type
13,435
def update(self, parent=None): if not self.id: raise self.ResourceError() data = self.__class__._process_request( connection.patch, parent=parent, id=self.id, payload=self.payload() ) return self._reload(data)
Updates the resource. This will trigger an api PATCH request. :param parent ResourceBase: the parent of the resource - used for nesting the request url, optional :raises ResourceError: if the resource does not have an id (does not exist yet) :returns: the resource itself
13,436
def make_vcard_data(name, displayname, email=None, phone=None, fax=None, videophone=None, memo=None, nickname=None, birthday=None, url=None, pobox=None, street=None, city=None, region=None, zipcode=None, country=None, org=None, lat=None, lng=None, source=None, rev=None, title=None, photo_uri=None): def make_multifield(name, val): if val is None: return () if isinstance(val, str_type): val = (val,) return [.format(name, escape(i)) for i in val] escape = _escape_vcard data = [, , .format(name), .format(escape(displayname))] if org: data.append(.format(escape(org))) data.extend(make_multifield(, email)) data.extend(make_multifield(, phone)) data.extend(make_multifield(, fax)) data.extend(make_multifield(, videophone)) data.extend(make_multifield(, url)) data.extend(make_multifield(, title)) data.extend(make_multifield(, photo_uri)) if nickname: data.append(.format(escape(nickname))) adr_properties = (pobox, street, city, region, zipcode, country) if any(adr_properties): adr_data = [escape(i or ) for i in adr_properties] data.append(.format(*adr_data)) if birthday: try: birthday = birthday.strftime() except AttributeError: pass if not _looks_like_datetime(birthday): raise ValueError() data.append(.format(birthday)) if lat or lng and (not(all((lat, lng)))): raise ValueError() if lat and lng: data.append(.format(lat, lng)) if source: data.append(.format(escape(url))) if memo: data.append(.format(escape(memo))) if rev: if not _looks_like_datetime(rev): raise ValueError() data.append(.format(rev)) data.append() data.append() return .join(data)
\ Creates a string encoding the contact information as vCard 3.0. Only a subset of available vCard properties is supported. :param str name: The name. If it contains a semicolon, , the first part is treated as lastname and the second part is treated as forename. :param str displayname: Common name. :param str|iterable email: E-mail address. Multiple values are allowed. :param str|iterable phone: Phone number. Multiple values are allowed. :param str|iterable fax: Fax number. Multiple values are allowed. :param str|iterable videophone: Phone number for video calls. Multiple values are allowed. :param str memo: A notice for the contact. :param str nickname: Nickname. :param str|date birthday: Birthday. If a string is provided, it should encode the date as YYYY-MM-DD value. :param str|iterable url: Homepage. Multiple values are allowed. :param str|None pobox: P.O. box (address information). :param str|None street: Street address. :param str|None city: City (address information). :param str|None region: Region (address information). :param str|None zipcode: Zip code (address information). :param str|None country: Country (address information). :param str org: Company / organization name. :param float lat: Latitude. :param float lng: Longitude. :param str source: URL where to obtain the vCard. :param str|date rev: Revision of the vCard / last modification date. :param str|iterable|None title: Job Title. Multiple values are allowed. :param str|iterable|None photo_uri: Photo URI. Multiple values are allowed. :rtype: str
13,437
def detach_zone(organization_id_or_slug): organization = Organization.objects.get_by_id_or_slug( organization_id_or_slug) if not organization: exit_with_error( .format(organization_id_or_slug) ) log.info(.format( organization=organization)) organization.zone = None organization.save() log.info()
Detach the zone of a given <organization>.
13,438
def deleteoutputfile(project, filename, credentials=None): user, oauth_access_token = parsecredentials(credentials) if filename: filename = filename.replace("..","") if not filename or len(filename) == 0: Project.reset(project, user) msg = "Deleted" return withheaders(flask.make_response(msg), ,{:len(msg), : settings.ALLOW_ORIGIN}) elif os.path.isdir(Project.path(project, user) + filename): shutil.rmtree(Project.path(project, user) + filename) msg = "Deleted" return withheaders(flask.make_response(msg), ,{:len(msg), : settings.ALLOW_ORIGIN}) else: try: file = clam.common.data.CLAMOutputFile(Project.path(project, user), filename) except: raise flask.abort(404) success = file.delete() if not success: raise flask.abort(404) else: msg = "Deleted" return withheaders(flask.make_response(msg), ,{:len(msg), : settings.ALLOW_ORIGIN})
Delete an output file
13,439
def PackageVariable(key, help, default, searchfunc=None): help = .join( (help, % key)) return (key, help, default, lambda k, v, e: _validator(k,v,e,searchfunc), _converter)
The input parameters describe a 'package list' option, thus they are returned with the correct converter and validator appended. The result is usable for input to opts.Add() . A 'package list' option may either be 'all', 'none' or a list of package names (separated by space).
13,440
def score(self): if not self.scoreProperties: self.scoreProperties = self.getScoreProperties() return sum(self.scoreProperties.values())
Returns the sum of the accidental dignities score.
13,441
def parse(self, filename, verbose=0): run_completed, start_datetime, end_datetime = False, None, None filename = os.path.abspath(filename) report = EventReport(filename) w = WildCard("*Error|*Warning|*Comment|*Bug|*ERROR|*WARNING|*COMMENT|*BUG") import warnings warnings.simplefilter(, yaml.error.UnsafeLoaderWarning) with YamlTokenizer(filename) as tokens: for doc in tokens: if w.match(doc.tag): try: event = yaml.load(doc.text) except: message = "Malformatted YAML document at line: %d\n" % doc.lineno message += doc.text if verbose: message += "Traceback:\n %s" % straceback() if "error" in doc.tag.lower(): print("It seems an error. doc.tag:", doc.tag) event = AbinitYamlError(message=message, src_file=__file__, src_line=0) else: event = AbinitYamlWarning(message=message, src_file=__file__, src_line=0) event.lineno = doc.lineno report.append(event) if doc.tag == "!FinalSummary": run_completed = True d = doc.as_dict() start_datetime, end_datetime = d["start_datetime"], d["end_datetime"] report.set_run_completed(run_completed, start_datetime, end_datetime) return report
Parse the given file. Return :class:`EventReport`.
13,442
def delete_namespaced_custom_object(self, group, version, namespace, plural, name, body, **kwargs): kwargs[] = True if kwargs.get(): return self.delete_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs) else: (data) = self.delete_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs) return data
Deletes the specified namespace scoped custom object This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_custom_object(group, version, namespace, plural, name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str namespace: The custom resource's namespace (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :param V1DeleteOptions body: (required) :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. :return: object If the method is called asynchronously, returns the request thread.
13,443
def run_iter(self, mine=False, jid=None): fstr = .format(self.opts[]) jid = self.returners[fstr](passed_jid=jid or self.opts.get(, None)) argv = self.opts[] if self.opts.get(, False): fun = args = argv else: fun = argv[0] if argv else args = argv[1:] job_load = { : jid, : self.tgt_type, : self.opts[], : self.opts[], : fun, : args, } if self.opts[] == : self.returners[.format(self.opts[])](jid, job_load, minions=self.targets.keys()) else: self.returners[.format(self.opts[])](jid, job_load) for ret in self.handle_ssh(mine=mine): host = next(six.iterkeys(ret)) self.cache_job(jid, host, ret[host], fun) if self.event: id_, data = next(six.iteritems(ret)) if isinstance(data, six.text_type): data = {: data} if not in data: data[] = id_ data[] = jid self.event.fire_event( data, salt.utils.event.tagify( [jid, , host], )) yield ret
Execute and yield returns as they come in, do not print to the display mine The Single objects will use mine_functions defined in the roster, pillar, or master config (they will be checked in that order) and will modify the argv with the arguments from mine_functions
13,444
def real_sound_match_abstract_sound(self, abstract_pos: AbstractPosition) -> bool: assert isinstance(abstract_pos, AbstractPosition) if self.before is not None and self.after is not None: return self.position == abstract_pos.position and self.before.match_list(abstract_pos.before) and \ self.after.match_list(abstract_pos.after) elif self.before is None and self.after is None: return self.position == abstract_pos.position elif self.before is None: return self.position == abstract_pos.position and self.after.match_list(abstract_pos.after) else: return self.position == abstract_pos.position and self.before.match_list(abstract_pos.before)
If an observed position :param abstract_pos: :return:
13,445
def add(self, child): if isinstance(child, Parameter): self.add_parameter(child) elif isinstance(child, Property): self.add_property(child) elif isinstance(child, DerivedParameter): self.add_derived_parameter(child) elif isinstance(child, IndexParameter): self.add_index_parameter(child) elif isinstance(child, Constant): self.add_constant(child) elif isinstance(child, Exposure): self.add_exposure(child) elif isinstance(child, Requirement): self.add_requirement(child) elif isinstance(child, ComponentRequirement): self.add_component_requirement(child) elif isinstance(child, InstanceRequirement): self.add_instance_requirement(child) elif isinstance(child, Children): self.add_children(child) elif isinstance(child, Text): self.add_text(child) elif isinstance(child, Link): self.add_link(child) elif isinstance(child, Path): self.add_path(child) elif isinstance(child, EventPort): self.add_event_port(child) elif isinstance(child, ComponentReference): self.add_component_reference(child) elif isinstance(child, Attachments): self.add_attachments(child) else: raise ModelError()
Adds a typed child object to the component type. @param child: Child object to be added.
13,446
def _reportFutures(self): try: while True: time.sleep(scoop.TIME_BETWEEN_STATUS_REPORTS) fids = set(x.id for x in scoop._control.execQueue.movable) fids.update(set(x.id for x in scoop._control.execQueue.ready)) fids.update(set(x.id for x in scoop._control.execQueue.inprogress)) self.socket.send_multipart([ STATUS_UPDATE, pickle.dumps(fids), ]) except AttributeError: pass
Sends futures status updates to broker at intervals of scoop.TIME_BETWEEN_STATUS_REPORTS seconds. Is intended to be run by a separate thread.
13,447
def thumbnail(self): response = GettRequest().get("/files/%s/%s/blob/thumb" % (self.sharename, self.fileid)) return response.response
This method returns a thumbnail representation of the file if the data is a supported graphics format. Input: * None Output: * A byte stream representing a thumbnail of a support graphics file Example:: file = client.get_file("4ddfds", 0) open("thumbnail.jpg", "wb").write(file.thumbnail())
13,448
def ui_device_label(self): return .join(filter(None, [ self.ui_device_presentation, self.loop_file or self.drive_label or self.ui_id_label or self.ui_id_uuid ]))
UI string identifying the device (drive) if toplevel.
13,449
def detect(self): if self.opts_url and self.opts_parser: url = self.opts_url parser = self.opts_parser else: url, parser = choice(self.urls) parser = globals().get("_parser_" + parser) theip = _get_ip_from_url(url, parser) if theip is None: LOG.info("Could not detect IP using webcheck! Offline?") self.set_current_value(theip) return theip
Try to contact a remote webservice and parse the returned output. Determine the IP address from the parsed output and return.
13,450
def filter_select_columns_intensity(df, prefix, columns): return df.filter(regex= % (prefix, .join(columns)) )
Filter dataframe to include specified columns, retaining any Intensity columns.
13,451
def query(url, **kwargs): **key1=val1&key2=val2*<xml>somecontent</xml> opts = __opts__.copy() if in kwargs: opts.update(kwargs[]) del kwargs[] return salt.utils.http.query(url=url, opts=opts, **kwargs)
Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: .. autofunction:: salt.utils.http.query CLI Example: .. code-block:: bash salt '*' http.query http://somelink.com/ salt '*' http.query http://somelink.com/ method=POST \ params='key1=val1&key2=val2' salt '*' http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>' For more information about the ``http.query`` module, refer to the :ref:`HTTP Tutorial <tutorial-http>`.
13,452
def errdp(marker, number): marker = stypes.stringToCharP(marker) number = ctypes.c_double(number) libspice.errdp_c(marker, number)
Substitute a double precision number for the first occurrence of a marker found in the current long error message. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/errdp_c.html :param marker: A substring of the error message to be replaced. :type marker: str :param number: The d.p. number to substitute for marker. :type number: float
13,453
def create(image_data): env_variables = [] for key, value in image_data.env_variables.items(): env_variables.append(client.V1EnvVar(name=key, value=value)) exposed_ports = [] if image_data.exposed_ports is not None: for port in image_data.exposed_ports: splits = port.split("/", 1) port = int(splits[0]) protocol = splits[1].upper() if len(splits) > 1 else None exposed_ports.append(client.V1ContainerPort(container_port=port, protocol=protocol)) image_name = image_data.name.split("/")[-1].split(":")[0] random_string = .join( random.choice(string.ascii_lowercase + string.digits) for _ in range(4)) container_name = .format( image_name=image_name, user_name=getpass.getuser(), random_string=random_string) container = client.V1Container(command=image_data.command, env=env_variables, image=image_data.name, name=container_name, ports=exposed_ports) pod_metadata = client.V1ObjectMeta(name=container_name + "-pod") pod_spec = client.V1PodSpec(containers=[container]) pod = client.V1Pod(spec=pod_spec, metadata=pod_metadata) return pod
:param image_data: ImageMetadata :return: V1Pod, https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Pod.md
13,454
def _decode_names(self): if self.subject_name is not None: subject_name = [] for part in self.subject_name: new_part = [] for name, value in part: try: name = name.decode("utf-8") value = value.decode("utf-8") except UnicodeError: continue new_part.append((name, value)) subject_name.append(tuple(new_part)) self.subject_name = tuple(subject_name) for key, old in self.alt_names.items(): new = [] for name in old: try: name = name.decode("utf-8") except UnicodeError: continue new.append(name) self.alt_names[key] = new
Decode names (hopefully ASCII or UTF-8) into Unicode.
13,455
def get_lock_behaviour(triggers, all_data, lock): updates = {} lock_key = config._forward_aliases.get(Constants.VERSION_LOCK_FIELD) if lock: updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_LOCK_VALUE elif ( triggers and lock_key and str(all_data.get(lock_key)) == str(config.VERSION_LOCK_VALUE) ): triggers.clear() updates[Constants.VERSION_LOCK_FIELD] = config.VERSION_UNLOCK_VALUE return updates
Binary state lock protects from version increments if set
13,456
def _subprocessor(self, disabled_qubits): edgelist = [(p, q) for p, q in self._edgelist if p not in disabled_qubits and q not in disabled_qubits] return eden_processor(edgelist, self.M, self.N, self.L, random_bundles=self._random_bundles)
Create a subprocessor by deleting a set of qubits. We assume this removes all evil edges, and return an :class:`eden_processor` instance.
13,457
def fast_cov(x, y=None, destination=None): validate_inputs(x, y, destination) if y is None: y = x if destination is None: destination = numpy.zeros((x.shape[1], y.shape[1])) mean_x = numpy.mean(x, axis=0) mean_y = numpy.mean(y, axis=0) mean_centered_x = (x - mean_x).astype(destination.dtype) mean_centered_y = (y - mean_y).astype(destination.dtype) numpy.dot(mean_centered_x.T, mean_centered_y, out=destination) numpy.divide(destination, (x.shape[0] - 1), out=destination) return destination
calculate the covariance matrix for the columns of x (MxN), or optionally, the covariance matrix between the columns of x and and the columns of y (MxP). (In the language of statistics, the columns are variables, the rows are observations). Args: x (numpy array-like) MxN in shape y (numpy array-like) MxP in shape destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy memmap of a file) returns (numpy array-like) array of the covariance values for defaults (y=None), shape is NxN if y is provided, shape is NxP
13,458
def _to_dict(self): _dict = {} if hasattr(self, ) and self.consumption_preference_category_id is not None: _dict[ ] = self.consumption_preference_category_id if hasattr(self, ) and self.name is not None: _dict[] = self.name if hasattr(self, ) and self.consumption_preferences is not None: _dict[] = [ x._to_dict() for x in self.consumption_preferences ] return _dict
Return a json dictionary representing this model.
13,459
def CELERY_RESULT_BACKEND(self): configured = get(, None) if configured: return configured if not self._redis_available(): return None host, port = self.REDIS_HOST, self.REDIS_PORT if host and port: default = "redis://{host}:{port}/{db}".format( host=host, port=port, db=self.CELERY_REDIS_RESULT_DB) return default
Redis result backend config
13,460
def main(): app = QApplication(sys.argv) rlbot_icon = QtGui.QIcon(os.path.join(get_rlbot_directory(), , )) app.setWindowIcon(rlbot_icon) window = RLBotQTGui() window.show() app.exec_()
Start the GUI :return:
13,461
def fc(inputs, num_units_out, activation=tf.nn.relu, stddev=0.01, bias=0.0, weight_decay=0, batch_norm_params=None, is_training=True, trainable=True, restore=True, scope=None, reuse=None): with tf.variable_scope(scope, , [inputs], reuse=reuse): num_units_in = inputs.get_shape()[1] weights_shape = [num_units_in, num_units_out] weights_initializer = tf.truncated_normal_initializer(stddev=stddev) l2_regularizer = None if weight_decay and weight_decay > 0: l2_regularizer = losses.l2_regularizer(weight_decay) weights = variables.variable(, shape=weights_shape, initializer=weights_initializer, regularizer=l2_regularizer, trainable=trainable, restore=restore) if batch_norm_params is not None: outputs = tf.matmul(inputs, weights) with scopes.arg_scope([batch_norm], is_training=is_training, trainable=trainable, restore=restore): outputs = batch_norm(outputs, **batch_norm_params) else: bias_shape = [num_units_out,] bias_initializer = tf.constant_initializer(bias) biases = variables.variable(, shape=bias_shape, initializer=bias_initializer, trainable=trainable, restore=restore) outputs = tf.nn.xw_plus_b(inputs, weights, biases) if activation: outputs = activation(outputs) return outputs
Adds a fully connected layer followed by an optional batch_norm layer. FC creates a variable called 'weights', representing the fully connected weight matrix, that is multiplied by the input. If `batch_norm` is None, a second variable called 'biases' is added to the result of the initial vector-matrix multiplication. Args: inputs: a [B x N] tensor where B is the batch size and N is the number of input units in the layer. num_units_out: the number of output units in the layer. activation: activation function. stddev: the standard deviation for the weights. bias: the initial value of the biases. weight_decay: the weight decay. batch_norm_params: parameters for the batch_norm. If is None don't use it. is_training: whether or not the model is in training mode. trainable: whether or not the variables should be trainable or not. restore: whether or not the variables should be marked for restore. scope: Optional scope for variable_scope. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. Returns: the tensor variable representing the result of the series of operations.
13,462
async def delete_tag(self, tag): path = .format(tag=tag, ext=self.format) params = {: self.token} return await self.query(path, "delete", **params)
DELETE /api/tags/{tag}.{_format} Permanently remove one tag from every entry :param tag: string The Tag :return data related to the ext
13,463
def all_host_infos(): output = [] output.append(["Operating system", os()]) output.append(["CPUID information", cpu()]) output.append(["CC information", compiler()]) output.append(["JDK information", from_cmd("java -version")]) output.append(["MPI information", from_cmd("mpirun -version")]) output.append(["Scala information", from_cmd("scala -version")]) output.append(["OpenCL headers", from_cmd( "find /usr/include|grep opencl.h")]) output.append(["OpenCL libraries", from_cmd( "find /usr/lib/ -iname ")]) output.append(["NVidia SMI", from_cmd("nvidia-smi -q")]) output.append(["OpenCL Details", opencl()]) return output
Summarize all host information.
13,464
def invalidate(self, comparison: Comparison[Entity, Entity]) -> None: @backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_tries=5, giveup=lambda e: 400 <= e.response.status_code < 500) def _request(chunk: List[str]) -> requests.Response: response = self._session.delete( f, headers={ : self._email, : self._key }, json={ : [self._prefix + path for path in chunk] }) response.raise_for_status() return response paths = itertools.chain(comparison.deleted(), comparison.modified()) for chunk_ in util.chunk(paths, self._MAX_INVALIDATIONS_PER_REQUEST): chunk_ = list(chunk_) if not chunk_: return logger.info(, len(chunk_), .join(chunk_)) response_ = _request(chunk_) logger.debug(, response_.status_code, response_.text) json_ = response_.json() if not json_[]: raise RuntimeError() logger.info(, json_[][])
Invalidate paths in a zone. See https://api.cloudflare.com /#zone-purge-individual-files-by-url-and-cache-tags :param comparison: The comparison whose changes to invalidate. :raises requests.exceptions.RequestException: On request failure. :raises RuntimeError: If the request succeeded but could not be carried out.
13,465
def max_pool(inputs, kernel_size, stride=2, padding=, scope=None): with tf.name_scope(scope, , [inputs]): kernel_h, kernel_w = _two_element_tuple(kernel_size) stride_h, stride_w = _two_element_tuple(stride) return tf.nn.max_pool(inputs, ksize=[1, kernel_h, kernel_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding)
Adds a Max Pooling layer. It is assumed by the wrapper that the pooling is only done per image and not in depth or batch. Args: inputs: a tensor of size [batch_size, height, width, depth]. kernel_size: a list of length 2: [kernel_height, kernel_width] of the pooling kernel over which the op is computed. Can be an int if both values are the same. stride: a list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: the padding method, either 'VALID' or 'SAME'. scope: Optional scope for name_scope. Returns: a tensor representing the results of the pooling operation. Raises: ValueError: if 'kernel_size' is not a 2-D list
13,466
def colorize(text, color=None, **kwargs): style = None bg = None if in kwargs: if kwargs[] not in STYLE: raise WrongStyle(.format(kwargs[], )) style = kwargs[] if in kwargs: if kwargs[] not in BACKGROUND: raise WrongBackground(.format(kwargs[], )) bg = kwargs[] if color not in COLOR: raise WrongColor(.format(color, )) if not in text: text = + .join([str(STYLE[style]), str(COLOR[color]), str(BACKGROUND[bg])])\ + + text + else: lst = text.split() text = for x in lst: if not x.startswith(): x = + .join([str(STYLE[style]), str(COLOR[color]), str(BACKGROUND[bg])])\ + + x + else: x += text += x return text
Colorize the text kwargs arguments: style=, bg=
13,467
def set_wts_get_npred_wt(gta, maskname): if is_null(maskname): maskname = None gta.set_weights_map(maskname) for name in gta.like.sourceNames(): gta._init_source(name) gta._update_roi() return build_srcdict(gta, )
Set a weights file and get the weighted npred for all the sources Parameters ---------- gta : `fermipy.GTAnalysis` The analysis object maskname : str The path to the file with the mask Returns ------- odict : dict Dictionary mapping from source name to weighted npred
13,468
def rule_low_registers(self, arg): r_num = self.check_register(arg) if r_num > 7: raise iarm.exceptions.RuleError( "Register {} is not a low register".format(arg))
Low registers are R0 - R7
13,469
def _set_data(self, **kwargs): if "shape" in kwargs: self.shape = kwargs["shape"] if "grid" in kwargs: self.dict_grid.clear() self.dict_grid.update(kwargs["grid"]) if "attributes" in kwargs: self.attributes[:] = kwargs["attributes"] if "row_heights" in kwargs: self.row_heights = kwargs["row_heights"] if "col_widths" in kwargs: self.col_widths = kwargs["col_widths"] if "macros" in kwargs: self.macros = kwargs["macros"]
Sets data from given parameters Old values are deleted. If a paremeter is not given, nothing is changed. Parameters ---------- shape: 3-tuple of Integer \tGrid shape grid: Dict of 3-tuples to strings \tCell content attributes: List of 3-tuples \tCell attributes row_heights: Dict of 2-tuples to float \t(row, tab): row_height col_widths: Dict of 2-tuples to float \t(col, tab): col_width macros: String \tMacros from macro list
13,470
def _extract_optimizer_param_name_and_group(optimizer_name, param): pat_1 = pat_2 = pat_1 = optimizer_name + pat_1 pat_2 = optimizer_name + pat_2 match_1 = re.compile(pat_1).fullmatch(param) match_2 = re.compile(pat_2).fullmatch(param) match = match_1 or match_2 if not match: raise AttributeError(.format( param, optimizer_name, )) groups = match.groupdict() param_group = groups.get(, ) param_name = groups[] return param_group, param_name
Extract param group and param name from the given parameter name. Raises an error if the param name doesn't match one of - ``optimizer__param_groups__<group>__<name>`` - ``optimizer__<name>`` In the second case group defaults to 'all'. The second case explicitly forbids ``optimizer__foo__bar`` since we do not know how to deal with unknown sub-params.
13,471
def get_knowledge_category(self): if not self.has_knowledge_category(): raise IllegalState() else: return Grade(self._get_grade_map(self._my_map[])),
Gets the grade associated with the knowledge dimension. return: (osid.grading.Grade) - the grade raise: IllegalState - has_knowledge_category() is false raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented.
13,472
def string(self, *args, **kwargs): compare = String(*args, **kwargs) self.add(compare) return self
Compare attributes of pairs with string algorithm. Shortcut of :class:`recordlinkage.compare.String`:: from recordlinkage.compare import String indexer = recordlinkage.Compare() indexer.add(String())
13,473
def autocomplete(): if not in os.environ: return cwords = os.environ[].split()[1:] cword = int(os.environ[]) try: current = cwords[cword - 1] except IndexError: current = subcommands = [cmd for cmd, summary in get_summaries()] options = [] try: subcommand_name = [w for w in cwords if w in subcommands][0] except IndexError: subcommand_name = None parser = create_main_parser() if subcommand_name: if subcommand_name == : sys.exit(1) should_list_installed = ( subcommand_name in [, ] and not current.startswith() ) if should_list_installed: installed = [] lc = current.lower() for dist in get_installed_distributions(local_only=True): if dist.key.startswith(lc) and dist.key not in cwords[1:]: installed.append(dist.key) if installed: for dist in installed: print(dist) sys.exit(1) subcommand = commands_dict[subcommand_name]() for opt in subcommand.parser.option_list_all: if opt.help != optparse.SUPPRESS_HELP: for opt_str in opt._long_opts + opt._short_opts: options.append((opt_str, opt.nargs)) prev_opts = [x.split()[0] for x in cwords[1:cword - 1]] options = [(x, v) for (x, v) in options if x not in prev_opts] options = [(k, v) for k, v in options if k.startswith(current)] completion_type = get_path_completion_type( cwords, cword, subcommand.parser.option_list_all, ) if completion_type: options = auto_complete_paths(current, completion_type) options = ((opt, 0) for opt in options) for option in options: opt_label = option[0] if option[1] and option[0][:2] == "--": opt_label += print(opt_label) else: opts = [i.option_list for i in parser.option_groups] opts.append(parser.option_list) opts = (o for it in opts for o in it) if current.startswith(): for opt in opts: if opt.help != optparse.SUPPRESS_HELP: subcommands += opt._long_opts + opt._short_opts else: completion_type = get_path_completion_type(cwords, cword, opts) if completion_type: subcommands = auto_complete_paths(current, completion_type) print(.join([x for x in subcommands if x.startswith(current)])) sys.exit(1)
Entry Point for completion of main and subcommand options.
13,474
def set_attribute(self, code, value): attr, _ = self.get_or_create(code=code) attr.value = value attr.save()
Set attribute for user
13,475
def _cbc_encrypt(self, content, final_key): aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv) padding = (16 - len(content) % AES.block_size) for _ in range(padding): content += chr(padding).encode() temp = bytes(content) return aes.encrypt(temp)
This method encrypts the content.
13,476
def add_vrf(self, auth, attr): self._logger.debug("add_vrf called; attr: %s" % unicode(attr)) req_attr = [ , ] self._check_attr(attr, req_attr, _vrf_attrs) insert, params = self._sql_expand_insert(attr) sql = "INSERT INTO ip_net_vrf " + insert self._execute(sql, params) vrf_id = self._lastrowid() vrf = self.list_vrf(auth, { : vrf_id })[0] audit_params = { : vrf[], : vrf[], : vrf[], : auth.username, : auth.authenticated_as, : auth.full_name, : auth.authoritative_source, : % (vrf[], unicode(vrf)) } sql, params = self._sql_expand_insert(audit_params) self._execute( % sql, params) return vrf
Add a new VRF. * `auth` [BaseAuth] AAA options. * `attr` [vrf_attr] The news VRF's attributes. Add a VRF based on the values stored in the `attr` dict. Returns a dict describing the VRF which was added. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.add_vrf` for full understanding.
13,477
def _init_go_sources(self, go_sources_arg, go2obj_arg): gos_user = set(go_sources_arg) if in self.kws and self.kws[]: gos_user |= get_leaf_children(gos_user, go2obj_arg) gos_godag = set(go2obj_arg) gos_source = gos_user.intersection(gos_godag) gos_missing = gos_user.difference(gos_godag) if not gos_missing: return gos_source sys.stdout.write("{N} GO IDs NOT FOUND IN GO DAG: {GOs}\n".format( N=len(gos_missing), GOs=" ".join([str(e) for e in gos_missing]))) return gos_source
Return GO sources which are present in GODag.
13,478
def scroll(self, scroll_id=None, body=None, params=None): if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH: raise ValueError("You need to supply scroll_id or body.") elif scroll_id and not body: body = {"scroll_id": scroll_id} elif scroll_id: params["scroll_id"] = scroll_id return self.transport.perform_request( "GET", "/_search/scroll", params=params, body=body )
Scroll a search request created by specifying the scroll parameter. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_ :arg scroll_id: The scroll ID :arg body: The scroll ID if not passed by URL or query parameter. :arg scroll: Specify how long a consistent view of the index should be maintained for scrolled search :arg rest_total_hits_as_int: This parameter is used to restore the total hits as a number in the response. This param is added version 6.x to handle mixed cluster queries where nodes are in multiple versions (7.0 and 6.latest)
13,479
def target_lines(self): target_lines = self._target_source_lines[:] deps_begin, deps_end = self._dependencies_interval target_lines[deps_begin:deps_end] = self.dependency_lines() return target_lines
The formatted target_type(...) lines for this target. This is just a convenience method for extracting and re-injecting the changed `dependency_lines` into the target text.
13,480
def _stream_helper(self, response, decode=False): if response.raw._fp.chunked: if decode: for chunk in json_stream(self._stream_helper(response, False)): yield chunk else: reader = response.raw while not reader.closed: data = reader.read(1) if not data: break if reader._fp.chunk_left: data += reader.read(reader._fp.chunk_left) yield data else: yield self._result(response, json=decode)
Generator for data coming from a chunked-encoded HTTP response.
13,481
def powered_off(name, connection=None, username=None, password=None): return _virt_call(name, , , , connection=connection, username=username, password=password)
Stops a VM by power off. .. versionadded:: 2016.3.0 :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 .. code-block:: yaml domain_name: virt.stopped
13,482
def get_unknown_check_result_brok(cmd_line): match = re.match( r r, cmd_line) if not match: match = re.match( r r, cmd_line) if not match: return None data = { : int(match.group(1)), : match.group(3), } if match.group(2) == : data[] = match.group(4) data[] = match.group(5) data[] = match.group(6) data[] = match.group(7) else: data[] = match.group(4) data[] = match.group(5) data[] = match.group(6) return Brok({: % match.group(2).lower(), : data})
Create unknown check result brok and fill it with command data :param cmd_line: command line to extract data :type cmd_line: str :return: unknown check result brok :rtype: alignak.objects.brok.Brok
13,483
def extract_response(self, extractors): if not extractors: return {} logger.log_debug("start to extract from response object.") extracted_variables_mapping = OrderedDict() extract_binds_order_dict = utils.ensure_mapping_format(extractors) for key, field in extract_binds_order_dict.items(): extracted_variables_mapping[key] = self.extract_field(field) return extracted_variables_mapping
extract value from requests.Response and store in OrderedDict. Args: extractors (list): [ {"resp_status_code": "status_code"}, {"resp_headers_content_type": "headers.content-type"}, {"resp_content": "content"}, {"resp_content_person_first_name": "content.person.name.first_name"} ] Returns: OrderDict: variable binds ordered dict
13,484
def center(self) -> Location: top = self.top() center_z = top.point.z - (self._depth / 2.0) return Location(Point(x=top.point.x, y=top.point.y, z=center_z), self)
:return: a Point corresponding to the absolute position of the center of the well relative to the deck (with the front-left corner of slot 1 as (0,0,0))
13,485
def get_url(self, action, obj=None, domain=True): if not obj: url = reverse(, None, (action.pk,)) elif hasattr(obj, ): url = obj.get_absolute_url() else: ctype = ContentType.objects.get_for_model(obj) url = reverse(, None, (ctype.pk, obj.pk)) if domain: return add_domain(Site.objects.get_current().domain, url) return url
Returns an RFC3987 IRI for a HTML representation of the given object, action. If domain is true, the current site's domain will be added.
13,486
def project_activity(index, start, end): results = { "metrics": [SubmittedPRs(index, start, end), ClosedPRs(index, start, end)] } return results
Compute the metrics for the project activity section of the enriched github pull requests index. Returns a dictionary containing a "metric" key. This key contains the metrics for this section. :param index: index object :param start: start date to get the data from :param end: end date to get the data upto :return: dictionary with the value of the metrics
13,487
def idd2grouplist(fhandle): try: txt = fhandle.read() return iddtxt2grouplist(txt) except AttributeError as e: txt = open(fhandle, ).read() return iddtxt2grouplist(txt)
wrapper for iddtxt2grouplist
13,488
def get_jid(jid): cb_ = _get_connection() _verify_views() ret = {} for result in cb_.query(DESIGN_NAME, , key=six.text_type(jid), include_docs=True): ret[result.value] = result.doc.value return ret
Return the information returned when the specified job id was executed
13,489
def _is_small_molecule(pe): val = isinstance(pe, _bp()) or \ isinstance(pe, _bpimpl()) or \ isinstance(pe, _bp()) or \ isinstance(pe, _bpimpl()) return val
Return True if the element is a small molecule
13,490
def _proxy(self): if self._context is None: self._context = WorkflowContext( self._version, workspace_sid=self._solution[], sid=self._solution[], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: WorkflowContext for this WorkflowInstance :rtype: twilio.rest.taskrouter.v1.workspace.workflow.WorkflowContext
13,491
def validate_capacity(capacity): if capacity not in VALID_SCALING_CONFIGURATION_CAPACITIES: raise ValueError( "ScalingConfiguration capacity must be one of: {}".format( ", ".join(map( str, VALID_SCALING_CONFIGURATION_CAPACITIES )) ) ) return capacity
Validate ScalingConfiguration capacity for serverless DBCluster
13,492
def libvlc_media_get_duration(p_md): f = _Cfunctions.get(, None) or \ _Cfunction(, ((1,),), None, ctypes.c_longlong, Media) return f(p_md)
Get duration (in ms) of media descriptor object item. @param p_md: media descriptor object. @return: duration of media item or -1 on error.
13,493
def _chunk_filter(self, extensions): if isinstance(extensions, six.string_types): extensions = extensions.split() def _filter(chunk): name = chunk[] if extensions is not None: if not any(name.endswith(e) for e in extensions): return False for pattern in self.state.ignore_re: if pattern.match(name): return False for pattern in self.state.ignore: if fnmatch.fnmatchcase(name, pattern): return False return True return _filter
Create a filter from the extensions and ignore files
13,494
def _set_show_support_save_status(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=show_support_save_status.show_support_save_status, is_leaf=True, yang_name="show-support-save-status", rest_name="show-support-save-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "rpc", : , }) self.__show_support_save_status = t if hasattr(self, ): self._set()
Setter method for show_support_save_status, mapped from YANG variable /brocade_ras_ext_rpc/show_support_save_status (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_show_support_save_status is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_show_support_save_status() directly. YANG Description: Information on the status of recent support save request
13,495
def interpret_expenditure_entry(entry): try: expenditure_amount = float(entry[]) entry[] = True entry[] = expenditure_amount except ValueError: entry[] = False try: expenditure_date = parse_iso_str(entry[]) filed_date = parse_iso_str(entry[]) entry[] = True entry[] = expenditure_date entry[] = filed_date except ValueError: entry[] = False try: amended = parse_yes_no_str(entry[]) amendment = parse_yes_no_str(entry[]) entry[] = True entry[] = amended entry[] = amendment except ValueError: entry[] = False return entry
Interpret data fields within a CO-TRACER expediture report. Interpret the expenditure amount, expenditure date, filed date, amended, and amendment fields of the provided entry. All dates (expenditure and filed) are interpreted together and, if any fails, all will retain their original value. Likewise, amended and amendment are interpreted together and if one is malformed, both will retain their original value. Entry may be edited in place and side-effects are possible in coupled code. However, client code should use the return value to guard against future changes. A value with the key 'AmountsInterpreted' will be set to True or False in the returned entry if floating point values are successfully interpreted (ExpenditureAmount) or not respectively. A value with the key 'DatesInterpreted' will be set to True or False in the returned entry if ISO 8601 strings are successfully interpreted (ExpenditureDate and FiledDate) or not respectively. A value with the key 'BooleanFieldsInterpreted' will be set to True or False in the returned entry if boolean strings are successfully interpreted (Amended and Amendment) or not respectively. @param entry: The expenditure report data to manipulate / interpret. @type entry: dict @return: The entry passed @raise ValueError: Raised if any expected field cannot be found in entry.
13,496
def get_partitioned_view_result(self, partition_key, ddoc_id, view_name, raw_result=False, **kwargs): ddoc = DesignDocument(self, ddoc_id) view = View(ddoc, view_name, partition_key=partition_key) return self._get_view_result(view, raw_result, **kwargs)
Retrieves the partitioned view result based on the design document and view name. See :func:`~cloudant.database.CouchDatabase.get_view_result` method for further details. :param str partition_key: Partition key. :param str ddoc_id: Design document id used to get result. :param str view_name: Name of the view used to get result. :param bool raw_result: Dictates whether the view result is returned as a default Result object or a raw JSON response. Defaults to False. :param kwargs: See :func:`~cloudant.database.CouchDatabase.get_view_result` method for available keyword arguments. :returns: The result content either wrapped in a QueryResult or as the raw response JSON content. :rtype: QueryResult, dict
13,497
def main(): conn = symphony.Config() try: agent, pod, symphony_sid = conn.connect() print ( % (symphony_sid)) except Exception as err: print ( % (err)) msgFormat = message = try: status_code, retstring = agent.send_message(symphony_sid, msgFormat, message) print ("%s: %s" % (status_code, retstring)) except Exception as err: print (retstring, err)
main program loop
13,498
async def install_sandboxed_update(filename, loop): log.debug("Creating virtual environment") venv_dir, python, venv_site_pkgs\ = await create_virtual_environment(loop=loop) log.debug("Installing update server into virtual environment") out, err, returncode = await _install(python, filename, loop) if err or returncode != 0: log.error("Install failed: {}".format(err)) res = {: , : err} else: log.debug("Install successful") res = {: } return res, python, venv_site_pkgs, venv_dir
Create a virtual environment and activate it, and then install an update candidate (leaves virtual environment activated) :return: a result dict and the path to python in the virtual environment
13,499
def procs(): * ret = {} uind = 0 pind = 0 cind = 0 plines = __salt__[](__grains__[], python_shell=True).splitlines() guide = plines.pop(0).split() if in guide: uind = guide.index() elif in guide: uind = guide.index() if in guide: pind = guide.index() if in guide: cind = guide.index() elif in guide: cind = guide.index() for line in plines: if not line: continue comps = line.split() ret[comps[pind]] = {: comps[uind], : .join(comps[cind:])} return ret
Return the process data .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' status.procs