Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
385,800
def tree_multiresolution(G, Nlevel, reduction_method=, compute_full_eigen=False, root=None): r if not root: if hasattr(G, ): root = G.root else: root = 1 Gs = [G] if compute_full_eigen: Gs[0].compute_fourier_basis() subsampled_vertex_indices = [] depths, parents = _tree_depths(G.A, root) old_W = G.W for lev in range(Nlevel): down_odd = round(depths) % 2 down_even = np.ones((Gs[lev].N)) - down_odd keep_inds = np.where(down_even == 1)[0] subsampled_vertex_indices.append(keep_inds) non_root_keep_inds, new_non_root_inds = np.setdiff1d(keep_inds, root) old_parents_of_non_root_keep_inds = parents[non_root_keep_inds] old_grandparents_of_non_root_keep_inds = parents[old_parents_of_non_root_keep_inds] old_W_i_inds, old_W_j_inds, old_W_weights = sparse.find(old_W) i_inds = np.concatenate((new_non_root_inds, new_non_root_parents)) j_inds = np.concatenate((new_non_root_parents, new_non_root_inds)) new_N = np.sum(down_even) if reduction_method == "unweighted": new_weights = np.ones(np.shape(i_inds)) elif reduction_method == "sum": old_weights_to_parents = old_W_weights[old_weights_to_parents_inds] old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds] new_weights = old_weights_to_parents + old_weights_parents_to_grandparents new_weights = np.concatenate((new_weights. new_weights)) elif reduction_method == "resistance_distance": old_weights_to_parents = old_W_weight[sold_weights_to_parents_inds] old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds] new_weights = 1./(1./old_weights_to_parents + 1./old_weights_parents_to_grandparents) new_weights = np.concatenate(([new_weights, new_weights])) else: raise ValueError() new_W = sparse.csc_matrix((new_weights, (i_inds, j_inds)), shape=(new_N, new_N)) new_root = np.where(keep_inds == root)[0] parents = np.zeros(np.shape(keep_inds)[0], np.shape(keep_inds)[0]) parents[:new_root - 1, new_root:] = new_non_root_parents depths = depths[keep_inds] depths = depths/2. Gtemp = graphs.Graph(new_W, coords=Gs[lev].coords[keep_inds], limits=G.limits, root=new_root) if compute_full_eigen: Gs[lev + 1].compute_fourier_basis() Gs.append(Gtemp) old_W = new_W root = new_root return Gs, subsampled_vertex_indices
r"""Compute a multiresolution of trees Parameters ---------- G : Graph Graph structure of a tree. Nlevel : Number of times to downsample and coarsen the tree root : int The index of the root of the tree. (default = 1) reduction_method : str The graph reduction method (default = 'resistance_distance') compute_full_eigen : bool To also compute the graph Laplacian eigenvalues for every tree in the sequence Returns ------- Gs : ndarray Ndarray, with each element containing a graph structure represent a reduced tree. subsampled_vertex_indices : ndarray Indices of the vertices of the previous tree that are kept for the subsequent tree.
385,801
def _convert_or_shorten_month(cls, data): short_month = { "jan": [str(1), "01", "Jan", "January"], "feb": [str(2), "02", "Feb", "February"], "mar": [str(3), "03", "Mar", "March"], "apr": [str(4), "04", "Apr", "April"], "may": [str(5), "05", "May"], "jun": [str(6), "06", "Jun", "June"], "jul": [str(7), "07", "Jul", "July"], "aug": [str(8), "08", "Aug", "August"], "sep": [str(9), "09", "Sep", "September"], "oct": [str(10), "Oct", "October"], "nov": [str(11), "Nov", "November"], "dec": [str(12), "Dec", "December"], } for month in short_month: if data in short_month[month]: return month return data
Convert a given month into our unified format. :param data: The month to convert or shorten. :type data: str :return: The unified month name. :rtype: str
385,802
def rect(self): CheckParent(self) val = _fitz.Link_rect(self) val = Rect(val) return val
rect(self) -> PyObject *
385,803
def gausspars(fwhm, nsigma=1.5, ratio=1, theta=0.): xsigma = fwhm / FWHM2SIG ysigma = ratio * xsigma f = nsigma**2/2. theta = np.deg2rad(theta) cost = np.cos(theta) sint = np.sin(theta) if ratio == 0: if theta == 0 or theta == 180: a = 1/xsigma**2 b = 0.0 c = 0.0 elif theta == 90: a = 0.0 b = 0.0 c = 1/xsigma**2 else: print() raise ValueError nx = 2 * int(max(2, (xsigma*nsigma*np.abs(cost))))+1 ny = 2 * int(max(2, (xsigma*nsigma*np.abs(sint))))+1 else: xsigma2 = xsigma * xsigma ysigma2 = ysigma * ysigma a = cost**2/xsigma2 + sint**2/ysigma2 b = 2 * cost * sint *(1.0/xsigma2-1.0/ysigma2) c = sint**2/xsigma2 + cost**2/ysigma2 d = b**2 - 4*a*c nx = 2 * int(2*max(1, nsigma*math.sqrt(-c/d)))+1 ny = 2 * int(2*max(1, nsigma*math.sqrt(-a/d)))+1 return nx, ny, a, b, c, f
height - the amplitude of the gaussian x0, y0, - center of the gaussian fwhm - full width at half maximum of the observation nsigma - cut the gaussian at nsigma ratio = ratio of xsigma/ysigma theta - angle of position angle of the major axis measured counter-clockwise from the x axis Returns dimensions nx and ny of the elliptical kernel as well as the ellipse parameters a, b, c, and f when defining an ellipse through the quadratic form: a*(x-x0)^2+b(x-x0)*(y-y0)+c*(y-y0)^2 <= 2*f
385,804
def add_to_emails(self, *emails): assert all(isinstance(element, (str, unicode)) for element in emails), emails post_parameters = emails headers, data = self._requester.requestJsonAndCheck( "POST", "/user/emails", input=post_parameters )
:calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_ :param email: string :rtype: None
385,805
def list_datasets(self, get_global_public): appending = "" if get_global_public: appending = "public" url = self.url() + "/resource/{}dataset/".format(appending) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataNotFoundError(.format(req.text)) else: return req.json()
Lists datasets in resources. Setting 'get_global_public' to 'True' will retrieve all public datasets in cloud. 'False' will get user's public datasets. Arguments: get_global_public (bool): True if user wants all public datasets in cloud. False if user wants only their public datasets. Returns: dict: Returns datasets in JSON format
385,806
def run(cl_args, compo_type): cluster, role, env = cl_args[], cl_args[], cl_args[] topology = cl_args[] spouts_only, bolts_only = cl_args[], cl_args[] try: components = tracker_access.get_logical_plan(cluster, env, topology, role) topo_info = tracker_access.get_topology_info(cluster, env, topology, role) table, header = to_table(components, topo_info) if spouts_only == bolts_only: print(tabulate(table, headers=header)) elif spouts_only: table, header = filter_spouts(table, header) print(tabulate(table, headers=header)) else: table, header = filter_bolts(table, header) print(tabulate(table, headers=header)) return True except: Log.error("Fail to connect to tracker: \", cl_args["tracker_url"]) return False
run command
385,807
def find_items(self, q, shape=ID_ONLY, depth=SHALLOW, additional_fields=None, order_fields=None, calendar_view=None, page_size=None, max_items=None, offset=0): if shape not in SHAPE_CHOICES: raise ValueError(" %s must be one of %s" % (shape, SHAPE_CHOICES)) if depth not in ITEM_TRAVERSAL_CHOICES: raise ValueError(" %s must be one of %s" % (depth, ITEM_TRAVERSAL_CHOICES)) if not self.folders: log.debug() return if additional_fields: for f in additional_fields: self.validate_item_field(field=f) for f in additional_fields: if f.field.is_complex: raise ValueError("find_items() does not support field . Use fetch() instead" % f.field.name) if calendar_view is not None and not isinstance(calendar_view, CalendarView): raise ValueError(" %s must be a CalendarView instance" % calendar_view) if q.is_empty(): restriction = None query_string = None elif q.query_string: restriction = None query_string = Restriction(q, folders=self.folders, applies_to=Restriction.ITEMS) else: restriction = Restriction(q, folders=self.folders, applies_to=Restriction.ITEMS) query_string = None log.debug( , self.folders, self.account, shape, depth, additional_fields, restriction.q if restriction else None, ) items = FindItem(account=self.account, folders=self.folders, chunk_size=page_size).call( additional_fields=additional_fields, restriction=restriction, order_fields=order_fields, shape=shape, query_string=query_string, depth=depth, calendar_view=calendar_view, max_items=calendar_view.max_items if calendar_view else max_items, offset=offset, ) if shape == ID_ONLY and additional_fields is None: for i in items: yield i if isinstance(i, Exception) else Item.id_from_xml(i) else: for i in items: if isinstance(i, Exception): yield i else: yield Folder.item_model_from_tag(i.tag).from_xml(elem=i, account=self.account)
Private method to call the FindItem service :param q: a Q instance containing any restrictions :param shape: controls whether to return (id, chanegkey) tuples or Item objects. If additional_fields is non-null, we always return Item objects. :param depth: controls the whether to return soft-deleted items or not. :param additional_fields: the extra properties we want on the return objects. Default is no properties. Be aware that complex fields can only be fetched with fetch() (i.e. the GetItem service). :param order_fields: the SortOrder fields, if any :param calendar_view: a CalendarView instance, if any :param page_size: the requested number of items per page :param max_items: the max number of items to return :param offset: the offset relative to the first item in the item collection :return: a generator for the returned item IDs or items
385,808
def _to_operator(rep, data, input_dim, output_dim): if rep == : return data if rep == : return _stinespring_to_operator(data, input_dim, output_dim) if rep != : data = _to_kraus(rep, data, input_dim, output_dim) return _kraus_to_operator(data, input_dim, output_dim)
Transform a QuantumChannel to the Operator representation.
385,809
def get_redirect_url(self, **kwargs): params = { : self.get_request_token().key, } return % (self.auth_url, urllib.urlencode(params))
Return the authorization/authentication URL signed with the request token.
385,810
def get_ref_free_exc_info(): "Free traceback from references to locals() in each frame to avoid circular reference leading to gc.collect() unable to reclaim memory" type, val, tb = sys.exc_info() traceback.clear_frames(tb) return (type, val, tb)
Free traceback from references to locals() in each frame to avoid circular reference leading to gc.collect() unable to reclaim memory
385,811
def read(filename,**kwargs): base,ext = os.path.splitext(filename) if ext in (,): return fitsio.read(filename,**kwargs) elif ext in (): return np.load(filename,**kwargs) elif ext in (): return np.recfromcsv(filename,**kwargs) elif ext in (,): return np.genfromtxt(filename,**kwargs) msg = "Unrecognized file type: %s"%filename raise ValueError(msg)
Read a generic input file into a recarray. Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat] Parameters: filename : input file name kwargs : keyword arguments for the reader Returns: recarray : data array
385,812
def update_editor ( self ): object = self.value canvas = self.factory.canvas if canvas is not None: for nodes_name in canvas.node_children: node_children = getattr(object, nodes_name) self._add_nodes(node_children) for edges_name in canvas.edge_children: edge_children = getattr(object, edges_name) self._add_edges(edge_children) self._add_listeners()
Updates the editor when the object trait changes externally to the editor.
385,813
def __solve_overlaps(self, start_time, end_time): if end_time is None or start_time is None: return query = conflicts = self.fetchall(query, (start_time, end_time, start_time, end_time, start_time, end_time)) for fact in conflicts: if start_time < fact["start_time"] and end_time > fact["end_time"]: continue if fact["start_time"] < start_time < fact["end_time"] and \ fact["start_time"] < end_time < fact["end_time"]: logger.info("splitting %s" % fact["name"]) self.execute(, (start_time, fact["id"])) fact_name = fact["name"] new_fact = Fact(fact["name"], category = fact["category"], description = fact["description"]) new_fact_id = self.__add_fact(new_fact.serialized_name(), end_time, fact["end_time"]) tag_update = self.execute(tag_update, (new_fact_id, fact["id"])) elif start_time < fact["start_time"] < end_time: logger.info("Overlapping start of %s" % fact["name"]) self.execute("UPDATE facts SET start_time=? WHERE id=?", (end_time, fact["id"])) elif start_time < fact["end_time"] < end_time: logger.info("Overlapping end of %s" % fact["name"]) self.execute("UPDATE facts SET end_time=? WHERE id=?", (start_time, fact["id"]))
finds facts that happen in given interval and shifts them to make room for new fact
385,814
def _involuted_reverse(self): def inv_is_top(si): return (si.stride == 1 and self._lower_bound == StridedInterval._modular_add(self._upper_bound, 1, self.bits) ) o = self.copy() o._reversed = not o._reversed if o.bits == 8: return o.copy() if inv_is_top(o): si = o.copy() return si else: lb = o._lower_bound ub = o._upper_bound rounded_bits = ((o.bits + 7) // 8) * 8 lb_r = [] ub_r = [] for i in xrange(0, rounded_bits, 8): if i != 0: lb = lb >> 8 ub = ub >> 8 lb_r.append(lb & 0xff) ub_r.append(ub & 0xff) si_lb = None si_ub = None for b in lb_r: if si_lb is None: si_lb = b else: si_lb <<= 8 si_lb |= b for b in ub_r: if si_ub is None: si_ub = b else: si_ub <<= 8 si_ub |= b si = StridedInterval(bits=o.bits, lower_bound=si_lb, upper_bound=si_ub, stride=o._stride, uninitialized=o.uninitialized) si._reversed = o._reversed if not o.is_integer: return si
This method reverses the StridedInterval object for real. Do expect loss of precision for most cases! :return: A new reversed StridedInterval instance
385,815
def get_link_page_text(link_page): text = for i, link in enumerate(link_page): capped_link_text = (link[] if len(link[]) <= 20 else link[][:19] + ) text += .format(i, capped_link_text, link[]) return text
Construct the dialog box to display a list of links to the user.
385,816
def delete(self): if not self.id: return if not self._loaded: self.reload() return self.http_delete(self.id, etag=self.etag)
Deletes the object. Returns without doing anything if the object is new.
385,817
def pretty_format(message): skip = { TIMESTAMP_FIELD, TASK_UUID_FIELD, TASK_LEVEL_FIELD, MESSAGE_TYPE_FIELD, ACTION_TYPE_FIELD, ACTION_STATUS_FIELD} def add_field(previous, key, value): value = unicode(pprint.pformat(value, width=40)).replace( "\\n", "\n ").replace("\\t", "\t") datetime.utcfromtimestamp(message[TIMESTAMP_FIELD]).isoformat( sep=str(" ")), remaining, )
Convert a message dictionary into a human-readable string. @param message: Message to parse, as dictionary. @return: Unicode string.
385,818
def delitem_via_sibseqs(ol,*sibseqs): abc pathlist = list(sibseqs) this = ol for i in range(0,pathlist.__len__()-1): key = pathlist[i] this = this.__getitem__(key) this.__delitem__(pathlist[-1]) return(ol)
from elist.elist import * y = ['a',['b',["bb"]],'c'] y[1][1] delitem_via_sibseqs(y,1,1) y
385,819
def set_task_object(self, task_id, task_progress_object): self.set_task(task_id=task_id, total=task_progress_object.total, prefix=task_progress_object.prefix, suffix=task_progress_object.suffix, decimals=task_progress_object.decimals, bar_length=task_progress_object.bar_length, keep_alive=task_progress_object.keep_alive, display_time=task_progress_object.display_time)
Defines a new progress bar with the given information using a TaskProgress object. :param task_id: Unique identifier for this progress bar. Will erase if already existing. :param task_progress_object: TaskProgress object holding the progress bar information.
385,820
def flush(self): content = self._buffer.getvalue() self._buffer = StringIO() if content: self._client.send_message(self._target, content, mtype="chat")
Sends buffered data to the target
385,821
def TEST(): w = World(, [0, 0.0, 0.9, 0.0]) print(w) p = Person(, {:0.0, :0.9,:0.9, :0.0}) print(p) h = Happiness(p,w) h.add_factor(HappinessFactors(, , 0.1, 0.3)) h.add_factor(HappinessFactors(, , 0.3, 0.9)) h.add_factor(HappinessFactors(, , 0.1, 0.9)) h.add_factor(HappinessFactors(, , 0.01, 0.09)) print(h.show_details())
Modules for testing happiness of 'persons' in 'worlds' based on simplistic preferences. Just a toy - dont take seriously ----- WORLD SUMMARY for : Mars ----- population = 0 tax_rate = 0.0 tradition = 0.9 equity = 0.0 Preferences for Rover tax_min = 0.0 equity = 0.0 tax_max = 0.9 tradition = 0.9 Rover is Indifferent in Mars (0) DETAILS tax: Economic = 0.1 -> 0.3 tradition: Personal = 0.3 -> 0.9 equity: Personal = 0.1 -> 0.9 growth: Economic = 0.01 -> 0.09
385,822
def base26(x, _alphabet=string.ascii_uppercase): result = [] while x: x, digit = divmod(x, 26) if not digit: x -= 1 digit = 26 result.append(_alphabet[digit - 1]) return .join(result[::-1])
Return positive ``int`` ``x`` as string in bijective base26 notation. >>> [base26(i) for i in [0, 1, 2, 26, 27, 28, 702, 703, 704]] ['', 'A', 'B', 'Z', 'AA', 'AB', 'ZZ', 'AAA', 'AAB'] >>> base26(344799) # 19 * 26**3 + 16 * 26**2 + 1 * 26**1 + 13 * 26**0 'SPAM' >>> base26(256) 'IV'
385,823
def get_consensus_hashes(block_heights, hostport=None, proxy=None): assert proxy or hostport, if proxy is None: proxy = connect_hostport(hostport) consensus_hashes_schema = { : , : { : { : , : { : { : , : OP_CONSENSUS_HASH_PATTERN, }, }, }, }, : [ , ], } resp_schema = json_response_schema( consensus_hashes_schema ) resp = {} try: resp = proxy.get_consensus_hashes(block_heights) resp = json_validate(resp_schema, resp) if json_is_error(resp): log.error(.format(block_heights, resp[])) return resp except ValidationError as e: if BLOCKSTACK_DEBUG: log.exception(e) resp = {: , : 502} return resp except socket.timeout: log.error("Connection timed out") resp = {: , : 503} return resp except socket.error as se: log.error("Connection error {}".format(se.errno)) resp = {: , : 502} return resp except Exception as ee: if BLOCKSTACK_DEBUG: log.exception(ee) log.error("Caught exception while connecting to Blockstack node: {}".format(ee)) resp = {: , : 500} return resp consensus_hashes = resp[] try: ret = {int(k): v for k, v in consensus_hashes.items()} log.debug(.format(ret)) return ret except ValueError: return {: , : 503}
Get consensus hashes for a list of blocks NOTE: returns {block_height (int): consensus_hash (str)} (coerces the key to an int) Returns {'error': ...} on error
385,824
def add(self, dist): if self.can_add(dist) and dist.has_version(): dists = self._distmap.setdefault(dist.key, []) if dist not in dists: dists.append(dist) dists.sort(key=operator.attrgetter(), reverse=True)
Add `dist` if we ``can_add()`` it and it has not already been added
385,825
def _CollectTypeChecks(function, parent_type_check_dict, stack_location, self_name): type_check_dict = dict(parent_type_check_dict) type_check_dict.update(_ParseDocstring(function)) for key, value in type_check_dict.items(): if isinstance(value, str): type_check_dict[key] = _ParseTypeCheckString(value, stack_location + 1, self_name) return type_check_dict
Collect all type checks for this function.
385,826
def add_unique_template_variables(self, options): options.update(dict( colorProperty=self.color_property, colorStops=self.color_stops, colorType=self.color_function_type, radiusType=self.radius_function_type, defaultColor=self.color_default, defaultRadius=self.radius_default, radiusProperty=self.radius_property, radiusStops=self.radius_stops, strokeWidth=self.stroke_width, strokeColor=self.stroke_color, highlightColor=self.highlight_color )) if self.vector_source: options.update(dict( vectorColorStops=self.generate_vector_color_map(), vectorRadiusStops=self.generate_vector_numeric_map()))
Update map template variables specific to graduated circle visual
385,827
def answers(self, other): if other.__class__ == self.__class__: return (other.service + 0x40) == self.service or \ (self.service == 0x7f and self.request_service_id == other.service) return False
DEV: true if self is an answer from other
385,828
def init_module(self, run_object): self.profile = self.profile_module self._run_object, _, self._run_args = run_object.partition() self._object_name = % self._run_object self._globs = { : self._run_object, : , : None, } program_path = os.path.dirname(self._run_object) if sys.path[0] != program_path: sys.path.insert(0, program_path) self._replace_sysargs()
Initializes profiler with a module.
385,829
def convert_types(cls, value): if isinstance(value, decimal.Decimal): return float(value) else: return value
Takes a value from MSSQL, and converts it to a value that's safe for JSON/Google Cloud Storage/BigQuery.
385,830
def query(querystr, connection=None, **connectkwargs): if connection is None: connection = connect(**connectkwargs) cursor = connection.cursor() cursor.execute(querystr) return cursor.fetchall()
Execute a query of the given SQL database
385,831
def _preprocess_values(self, Y): Y_prep = Y.copy() Y1 = Y[Y.flatten()==1].size Y2 = Y[Y.flatten()==0].size assert Y1 + Y2 == Y.size, Y_prep[Y.flatten() == 0] = -1 return Y_prep
Check if the values of the observations correspond to the values assumed by the likelihood function. ..Note:: Binary classification algorithm works better with classes {-1, 1}
385,832
def segments(self): seg_list = self._event.chat_message.message_content.segment return [ChatMessageSegment.deserialize(seg) for seg in seg_list]
List of :class:`ChatMessageSegment` in message (:class:`list`).
385,833
def get_safe_return_to(request, return_to): if return_to and is_safe_url(url=return_to, host=request.get_host()) and return_to != request.build_absolute_uri(): return return_to
Ensure the user-originating redirection url is safe, i.e. within same scheme://domain:port
385,834
def reduce(fname, reduction_factor): if fname.endswith(): with open(fname) as f: line = f.readline() if csv.Sniffer().has_header(line): header = line all_lines = f.readlines() else: header = None f.seek(0) all_lines = f.readlines() lines = general.random_filter(all_lines, reduction_factor) shutil.copy(fname, fname + ) print( % fname) _save_csv(fname, lines, header) print( % (len(lines), len(all_lines))) return elif fname.endswith(): array = numpy.load(fname) shutil.copy(fname, fname + ) print( % fname) arr = numpy.array(general.random_filter(array, reduction_factor)) numpy.save(fname, arr) print( % (len(arr), len(array))) return node = nrml.read(fname) model = node[0] if model.tag.endswith(): total = len(model.assets) model.assets.nodes = general.random_filter( model.assets, reduction_factor) num_nodes = len(model.assets) elif model.tag.endswith(): total = len(model) model.nodes = general.random_filter(model, reduction_factor) num_nodes = len(model) elif model.tag.endswith(): reduce_source_model(fname, reduction_factor) return elif model.tag.endswith(): for smpath in logictree.collect_info(fname).smpaths: reduce_source_model(smpath, reduction_factor) return else: raise RuntimeError( % model.tag) save_bak(fname, node, num_nodes, total)
Produce a submodel from `fname` by sampling the nodes randomly. Supports source models, site models and exposure models. As a special case, it is also able to reduce .csv files by sampling the lines. This is a debugging utility to reduce large computations to small ones.
385,835
def encode_sequence(content, error=None, version=None, mode=None, mask=None, encoding=None, eci=False, boost_error=True, symbol_count=None): def one_item_segments(chunk, mode): segs = Segments() segs.add_segment(make_segment(chunk, mode=mode, encoding=encoding)) return segs def divide_into_chunks(data, num): k, m = divmod(len(data), num) return [data[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(num)] def calc_qrcode_bit_length(char_count, ver_range, mode, encoding=None, is_eci=False, is_sa=False): overhead = 4 overhead += consts.CHAR_COUNT_INDICATOR_LENGTH[mode][ver_range] if is_eci and mode == consts.MODE_BYTE and encoding != consts.DEFAULT_BYTE_ENCODING: overhead += 4 overhead += 8 if is_sa: overhead += 5 * 4 bits = 0 if mode == consts.MODE_NUMERIC: num, remainder = divmod(char_count, 3) bits += num * 10 + (4 if remainder == 1 else 7) elif mode == consts.MODE_ALPHANUMERIC: num, remainder = divmod(char_count, 2) bits += num * 11 + (6 if remainder else 0) elif mode == consts.MODE_BYTE: bits += char_count * 8 elif mode == consts.MODE_KANJI: bits += char_count * 13 return overhead + bits def number_of_symbols_by_version(content, version, error, mode): length = len(content) ver_range = version_range(version) bit_length = calc_qrcode_bit_length(length, ver_range, mode, encoding, is_eci=eci, is_sa=True) capacity = consts.SYMBOL_CAPACITY[version][error] cnt = int(math.ceil(bit_length / capacity)) bit_length += 5 * 4 * (cnt - 1) + (12 * (cnt - 1) if eci else 0) return int(math.ceil(bit_length / capacity)) version = normalize_version(version) if version is not None: if version < 1: raise VersionError( .format(get_version_name(version))) elif symbol_count is None: raise ValueError() if symbol_count is not None and not 1 <= symbol_count <= 16: raise ValueError() error = normalize_errorlevel(error, accept_none=True) if error is None: error = consts.ERROR_LEVEL_L mode = normalize_mode(mode) mask = normalize_mask(mask, is_micro=False) segments = prepare_data(content, mode, encoding, version) guessed_version = None if symbol_count is None: try: guessed_version = find_version(segments, error, eci=eci, micro=False) except DataOverflowError: pass if guessed_version and guessed_version <= (version or guessed_version): return [_encode(segments, error=error, version=(version or guessed_version), mask=mask, eci=eci, boost_error=boost_error)] if len(segments.modes) > 1: raise ValueError() mode = segments.modes[0] if mode == consts.MODE_NUMERIC: content = str(content) if symbol_count is not None and len(content) < symbol_count: raise ValueError(.format(symbol_count)) sa_parity_data = calc_structured_append_parity(content) num_symbols = symbol_count or 16 if version is not None: num_symbols = number_of_symbols_by_version(content, version, error, mode) if num_symbols > 16: raise DataOverflowError(.format(version)) chunks = divide_into_chunks(content, num_symbols) if symbol_count is not None: segments = one_item_segments(max(chunks, key=len), mode) version = find_version(segments, error, eci=eci, micro=False, is_sa=True) sa_info = partial(_StructuredAppendInfo, total=len(chunks) - 1, parity=sa_parity_data) return [_encode(one_item_segments(chunk, mode), error=error, version=version, mask=mask, eci=eci, boost_error=boost_error, sa_info=sa_info(i)) for i, chunk in enumerate(chunks)]
\ EXPERIMENTAL: Creates a sequence of QR Codes in Structured Append mode. :return: Iterable of named tuples, see :py:func:`encode` for details.
385,836
def _do_api_call(self, method, data): data.update({ "key": self.api_key, status_code = root.find("header/status/code").text exc_class = _get_exception_class_from_status_code(status_code) if exc_class: error_message = root.find("header/status/message").text raise exc_class(error_message) return root
Convenience method to carry out a standard API call against the Petfinder API. :param basestring method: The API method name to call. :param dict data: Key/value parameters to send to the API method. This varies based on the method. :raises: A number of :py:exc:`petfinder.exceptions.PetfinderAPIError`` sub-classes, depending on what went wrong. :rtype: lxml.etree._Element :returns: The parsed document.
385,837
def patch(module, external=(), internal=()): external = tuple(external) internal = tuple(internal) def decorator(fn): @wraps(fn) def wrapper(*args, **kwargs): master_mock = mock.MagicMock() def get_mock(name): return getattr(master_mock, __patch_name(name)) def patch_external(name): return mock.patch(name, get_mock(name)) def patch_internal(name): return mock.patch(module.__name__ + + name, get_mock(name)) try: with __nested(patch_external(n) for n in external): if external: reload_module(module) with __nested(patch_internal(n) for n in internal): return fn(master_mock, *args, **kwargs) finally: if external: reload_module(module) return wrapper return decorator
Temporarily monkey-patch dependencies which can be external to, or internal to the supplied module. :param module: Module object :param external: External dependencies to patch (full paths as strings) :param internal: Internal dependencies to patch (short names as strings) :return:
385,838
def activate_left(self, token): watchers.MATCHER.debug( "Node <%s> activated left with token %r", self, token) return self._activate_left(token.copy())
Make a copy of the received token and call `_activate_left`.
385,839
def str_rfind(x, sub, start=0, end=None): return _to_string_sequence(x).find(sub, start, 0 if end is None else end, end is None, False)
Returns the highest indices in each string in a column, where the provided substring is fully contained between within a sample. If the substring is not found, -1 is returned. :param str sub: A substring to be found in the samples :param int start: :param int end: :returns: an expression containing the highest indices specifying the start of the substring. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.rfind(sub="et") Expression = str_rfind(text, sub='et') Length: 5 dtype: int64 (expression) ----------------------------------- 0 3 1 7 2 -1 3 -1 4 -1
385,840
def _generate_splits(self, m, r): new_rects = [] if r.left > m.left: new_rects.append(Rectangle(m.left, m.bottom, r.left-m.left, m.height)) if r.right < m.right: new_rects.append(Rectangle(r.right, m.bottom, m.right-r.right, m.height)) if r.top < m.top: new_rects.append(Rectangle(m.left, r.top, m.width, m.top-r.top)) if r.bottom > m.bottom: new_rects.append(Rectangle(m.left, m.bottom, m.width, r.bottom-m.bottom)) return new_rects
When a rectangle is placed inside a maximal rectangle, it stops being one and up to 4 new maximal rectangles may appear depending on the placement. _generate_splits calculates them. Arguments: m (Rectangle): max_rect rectangle r (Rectangle): rectangle placed Returns: list : list containing new maximal rectangles or an empty list
385,841
def get_page_labels(self, page_id, prefix=None, start=None, limit=None): url = .format(id=page_id) params = {} if prefix: params[] = prefix if start is not None: params[] = int(start) if limit is not None: params[] = int(limit) return self.get(url, params=params)
Returns the list of labels on a piece of Content. :param page_id: A string containing the id of the labels content container. :param prefix: OPTIONAL: The prefixes to filter the labels with {@see Label.Prefix}. Default: None. :param start: OPTIONAL: The start point of the collection to return. Default: None (0). :param limit: OPTIONAL: The limit of the number of labels to return, this may be restricted by fixed system limits. Default: 200. :return: The JSON data returned from the content/{id}/label endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
385,842
def extractHolidayWeekendSchedules(self): result = namedtuple("result", ["Weekend", "Holiday"]) result.Weekend = self.m_hldy["Weekend_Schd"][MeterData.StringValue] result.Holiday = self.m_hldy["Holiday_Schd"][MeterData.StringValue] return result
extract holiday and weekend :class:`~ekmmeters.Schedule` from meter object buffer. Returns: tuple: Holiday and weekend :class:`~ekmmeters.Schedule` values, as strings. ======= ====================================== Holiday :class:`~ekmmeters.Schedule` as string Weekend :class:`~ekmmeters.Schedule` as string ======= ======================================
385,843
def expose_ancestors_or_children(self, member, collection, lang=None): x = { "id": member.id, "label": str(member.get_label(lang)), "model": str(member.model), "type": str(member.type), "size": member.size, "semantic": self.semantic(member, parent=collection) } if isinstance(member, ResourceCollection): x["lang"] = str(member.lang) return x
Build an ancestor or descendant dict view based on selected information :param member: Current Member to build for :param collection: Collection from which we retrieved it :param lang: Language to express data in :return:
385,844
def create_service_from_endpoint(endpoint, service_type, title=None, abstract=None, catalog=None): from models import Service if Service.objects.filter(url=endpoint, catalog=catalog).count() == 0: request = requests.get(endpoint) if request.status_code == 200: LOGGER.debug( % (service_type, endpoint, catalog)) service = Service( type=service_type, url=endpoint, title=title, abstract=abstract, csw_type=, catalog=catalog ) service.save() return service else: LOGGER.warning( % request.status_code) else: LOGGER.warning( % (endpoint, catalog)) return None
Create a service from an endpoint if it does not already exists.
385,845
def getKeyword(filename, keyword, default=None, handle=None): if filename.find() < 0: filename += _fname, _extn = parseFilename(filename) if not handle: _fimg = openImage(_fname) else: if isinstance(handle, fits.HDUList): _fimg = handle else: raise ValueError( % fits.HDUList) _hdr = getExtn(_fimg, _extn).header try: value = _hdr[keyword] except KeyError: _nextn = findKeywordExtn(_fimg, keyword) try: value = _fimg[_nextn].header[keyword] except KeyError: value = if not handle: _fimg.close() del _fimg if value == : if default is None: value = None else: value = default elif isinstance(value, string_types): if value[-1:] == : value = value[:-1] return value
General, write-safe method for returning a keyword value from the header of a IRAF recognized image. Returns the value as a string.
385,846
def generate_join_docs_list(self, left_collection_list, right_collection_list): joined_docs = [] if (len(left_collection_list) != 0) and (len(right_collection_list) != 0): for left_doc in left_collection_list: for right_doc in right_collection_list: l_dict = self.change_dict_keys(left_doc, ) r_dict = self.change_dict_keys(right_doc, ) joined_docs.append(dict(l_dict, **r_dict)) elif left_collection_list: for left_doc in left_collection_list: joined_docs.append(self.change_dict_keys(left_doc, )) else: for right_doc in right_collection_list: joined_docs.append(self.change_dict_keys(right_doc, )) return joined_docs
Helper function for merge_join_docs :param left_collection_list: Left Collection to be joined :type left_collection_list: MongoCollection :param right_collection_list: Right Collection to be joined :type right_collection_list: MongoCollection :return joined_docs: List of docs post join
385,847
def _nextSequence(cls, name=None): if not name: name = cls._sqlSequence if not name: curs = cls.cursor() curs.execute("SELECT nextval()" % name) value = curs.fetchone()[0] curs.close() return value
Return a new sequence number for insertion in self._sqlTable. Note that if your sequences are not named tablename_primarykey_seq (ie. for table 'blapp' with primary key 'john_id', sequence name blapp_john_id_seq) you must give the full sequence name as an optional argument to _nextSequence)
385,848
def get_project(self) -> str: with IHCController._mutex: if self._project is None: if self.client.get_state() != IHCSTATE_READY: ready = self.client.wait_for_state_change(IHCSTATE_READY, 10) if ready != IHCSTATE_READY: return None self._project = self.client.get_project() return self._project
Get the ihc project and make sure controller is ready before
385,849
def new(cls, nsptagname, val): elm = OxmlElement(nsptagname) elm.val = val return elm
Return a new ``CT_String`` element with tagname *nsptagname* and ``val`` attribute set to *val*.
385,850
def send(self, smtp=None, **kw): smtp_options = {} smtp_options.update(self.config.smtp_options) if smtp: smtp_options.update(smtp) return super(Message, self).send(smtp=smtp_options, **kw)
Sends message. :param smtp: When set, parameters from this dictionary overwrite options from config. See `emails.Message.send` for more information. :param kwargs: Parameters for `emails.Message.send` :return: Response objects from emails backend. For default `emails.backend.smtp.STMPBackend` returns an `emails.backend.smtp.SMTPResponse` object.
385,851
def switch_delete_record_for_userid(self, userid): with get_network_conn() as conn: conn.execute("DELETE FROM switch WHERE userid=?", (userid,)) LOG.debug("Switch record for user %s is removed from " "switch table" % userid)
Remove userid switch record from switch table.
385,852
def repack(self, to_width, *, msb_first, start=0, start_bit=0, length=None): to_width = operator.index(to_width) if not isinstance(msb_first, bool): raise TypeError() available = self.repack_data_available( to_width, start=start, start_bit=start_bit) if length is None: length = available else: length = operator.index(length) if length > available: raise ValueError() if length < 0: raise ValueError() start = operator.index(start) start_bit = operator.index(start_bit) pos = start accum = BinWord(0, 0) if start_bit: accum = self[pos] pos += 1 rest = accum.width - start_bit if msb_first: accum = accum.extract(0, rest) else: accum = accum.extract(start_bit, rest) res = BinArray(width=to_width, length=length) for idx in range(length): while len(accum) < to_width: cur = self[pos] pos += 1 if msb_first: accum = BinWord.concat(cur, accum) else: accum = BinWord.concat(accum, cur) rest = accum.width - to_width if msb_first: cur = accum.extract(rest, to_width) accum = accum.extract(0, rest) else: cur = accum.extract(0, to_width) accum = accum.extract(to_width, rest) res[idx] = cur return res
Extracts a part of a BinArray's data and converts it to a BinArray of a different width. For the purposes of this conversion, words in this BinArray are joined side-by-side, starting from a given start index (defaulting to 0), skipping ``start_bit`` first bits of the first word, then the resulting stream is split into ``to_width``-sized words and ``length`` first such words are returned as a new BinArray. If ``msb_first`` is False, everything proceeds with little endian ordering: the first word provides the least significant bits of the combined stream, ``start_bit`` skips bits starting from the LSB, and the first output word is made from the lowest bits of the combined stream. Otherwise (``msb_first`` is True), everything proceeds with big endian ordering: the first word provides the most significant bits of the combined stream, ``start_bit`` skips bits starting from the MSB, and the first output word is made from the highest bits of the combined stream. ``start_bits`` must be smaller than the width of the input word. It is an error to request a larger length than can be provided from the input array. If ``length`` is not provided, this function returns as many words as can be extracted. For example, consider a 10-to-3 repack with start_bit=2, length=4 msb_first=True: +---------+-+-+-+-+-+-+-+-+-+-+ | | MSB ... LSB | +---------+-+-+-+-+-+-+-+-+-+-+ | | ... | +---------+-+-+-+-+-+-+-+-+-+-+ | start |X|X|a|b|c|d|e|f|g|h| +---------+-+-+-+-+-+-+-+-+-+-+ | start+1 |i|j|k|l|X|X|X|X|X|X| +---------+-+-+-+-+-+-+-+-+-+-+ | | ... | +---------+-+-+-+-+-+-+-+-+-+-+ is repacked to: +-+-+-+-+ |0|a|b|c| +-+-+-+-+ |1|d|e|f| +-+-+-+-+ |2|g|h|i| +-+-+-+-+ |3|j|k|l| +-+-+-+-+ The same repack for msb_first=False is performed as follows: +---------+-+-+-+-+-+-+-+-+-+-+ | | MSB ... LSB | +---------+-+-+-+-+-+-+-+-+-+-+ | | ... | +---------+-+-+-+-+-+-+-+-+-+-+ | start |h|g|f|e|d|c|b|a|X|X| +---------+-+-+-+-+-+-+-+-+-+-+ | start+1 |X|X|X|X|X|X|l|k|j|i| +---------+-+-+-+-+-+-+-+-+-+-+ | | ... | +---------+-+-+-+-+-+-+-+-+-+-+ into: +-+-+-+-+ |0|c|b|a| +-+-+-+-+ |1|f|e|d| +-+-+-+-+ |2|i|h|g| +-+-+-+-+ |3|l|k|j| +-+-+-+-+
385,853
def _escape(self, msg): escaped = for c in msg: escaped += c if c == : escaped += return escaped
Escapes double quotes by adding another double quote as per the Scratch protocol. Expects a string without its delimiting quotes. Returns a new escaped string.
385,854
def timezone(self, tz): self.data[].update( timezone=tz, time_show_zone=True)
Set timezone on the audit records. Timezone can be in formats: 'US/Eastern', 'PST', 'Europe/Helsinki' See SMC Log Viewer settings for more examples. :param str tz: timezone, i.e. CST
385,855
def send(self, to, from_, body, dm=False): tweet = .format(to, body) if from_ not in self.accounts: raise AccountNotFoundError() if len(tweet) > 140: raise TweetTooLongError() self.auth.set_access_token(*self.accounts.get(from_)) api = tweepy.API(self.auth) if dm: api.send_direct_message(screen_name=to, text=body) else: api.update_status(tweet) return
Send BODY as an @message from FROM to TO If we don't have the access tokens for FROM, raise AccountNotFoundError. If the tweet resulting from '@{0} {1}'.format(TO, BODY) is > 140 chars raise TweetTooLongError. If we want to send this message as a DM, do so. Arguments: - `to`: str - `from_`: str - `body`: str - `dm`: [optional] bool Return: None Exceptions: AccountNotFoundError TweetTooLongError
385,856
def domain_block(self, domain=None): params = self.__generate_params(locals()) self.__api_request(, , params)
Add a block for all statuses originating from the specified domain for the logged-in user.
385,857
def validate_text(value): possible_transform = [, , ] validate_transform = ValidateInStrings(, possible_transform, True) tests = [validate_float, validate_float, validate_str, validate_transform, dict] if isinstance(value, six.string_types): xpos, ypos = rcParams[] return [(xpos, ypos, value, , {: })] elif isinstance(value, tuple): value = [value] try: value = list(value)[:] except TypeError: raise ValueError("Value must be string or list of tuples!") for i, val in enumerate(value): try: val = tuple(val) except TypeError: raise ValueError( "Text must be an iterable of the form " "(x, y, s[, trans, params])!") if len(val) < 3: raise ValueError( "Text tuple must at least be like [x, y, s], with floats x, " "y and string s!") elif len(val) == 3 or isinstance(val[3], dict): val = list(val) val.insert(3, ) if len(val) == 4: val += [{}] val = tuple(val) if len(val) > 5: raise ValueError( "Text tuple must not be longer then length 5. It can be " "like (x, y, s[, trans, params])!") value[i] = (validate(x) for validate, x in zip(tests, val)) return value
Validate a text formatoption Parameters ---------- value: see :attr:`psyplot.plotter.labelplotter.text` Raises ------ ValueError
385,858
def interpolate_gridded_scalar(self, x, y, c, order=1, pad=1, offset=0):
Interpolate gridded scalar C to points x,y. Parameters ---------- x, y : array-like Points at which to interpolate c : array-like The scalar, assumed to be defined on the grid. order : int Order of interpolation pad : int Number of pad cells added offset : int ??? Returns ------- ci : array-like The interpolated scalar
385,859
def add_auth_attribute(attr, value, actor=False): if attr in (, , , , , ): raise AttributeError("Attribute name %s is reserved by current_auth" % attr) ca = current_auth._get_current_object()
Helper function for login managers. Adds authorization attributes to :obj:`current_auth` for the duration of the request. :param str attr: Name of the attribute :param value: Value of the attribute :param bool actor: Whether this attribute is an actor (user or client app accessing own data) If the attribute is an actor and :obj:`current_auth` does not currently have an actor, the attribute is also made available as ``current_auth.actor``, which in turn is used by ``current_auth.is_authenticated``. The attribute name ``user`` is special-cased: 1. ``user`` is always treated as an actor 2. ``user`` is also made available as ``_request_ctx_stack.top.user`` for compatibility with Flask-Login
385,860
def compliance_report(self, validation_file=None, validation_source=None): return validate.compliance_report( self, validation_file=validation_file, validation_source=validation_source )
Return a compliance report. Verify that the device complies with the given validation file and writes a compliance report file. See https://napalm.readthedocs.io/en/latest/validate/index.html. :param validation_file: Path to the file containing compliance definition. Default is None. :param validation_source: Dictionary containing compliance rules. :raise ValidationException: File is not valid. :raise NotImplementedError: Method not implemented.
385,861
def mouseMoveEvent(self, e): super(PyInteractiveConsole, self).mouseMoveEvent(e) cursor = self.cursorForPosition(e.pos()) assert isinstance(cursor, QtGui.QTextCursor) p = cursor.positionInBlock() usd = cursor.block().userData() if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block: if QtWidgets.QApplication.overrideCursor() is None: QtWidgets.QApplication.setOverrideCursor( QtGui.QCursor(QtCore.Qt.PointingHandCursor)) else: if QtWidgets.QApplication.overrideCursor() is not None: QtWidgets.QApplication.restoreOverrideCursor()
Extends mouseMoveEvent to display a pointing hand cursor when the mouse cursor is over a file location
385,862
def apply(self, coordinates): transform = self.get_transformation(coordinates) result = MolecularDistortion(self.affected_atoms, transform) result.apply(coordinates) return result
Generate, apply and return a random manipulation
385,863
def set_monitor(module): def monitor(name, tensor, track_data=True, track_grad=True): module.monitored_vars[name] = { :tensor, :track_data, :track_grad, } module.monitor = monitor
Defines the monitor method on the module.
385,864
def cmsearch_from_file(cm_file_path, seqs, moltype, cutoff=0.0, params=None): seqs = SequenceCollection(seqs,MolType=moltype).degap() int_map, int_keys = seqs.getIntMap() int_map = SequenceCollection(int_map,MolType=moltype) app = Cmsearch(InputHandler=,WorkingDir=,\ params=params) app.Parameters[].on() app.Parameters[].on(cutoff) seqs_path = app._input_as_multiline_string(int_map.toFasta()) paths = [cm_file_path,seqs_path] _, tmp_file = mkstemp(dir=app.WorkingDir) app.Parameters[].on(tmp_file) res = app(paths) search_results = list(CmsearchParser(res[].readlines())) if search_results: for i,line in enumerate(search_results): label = line[1] search_results[i][1]=int_keys.get(label,label) res.cleanUp() return search_results
Uses cmbuild to build a CM file, then cmsearch to find homologs. - cm_file_path: path to the file created by cmbuild, containing aligned sequences. This will be used to search sequences in seqs. - seqs: SequenceCollection object or something that can be used to construct one, containing unaligned sequences that are to be searched. - moltype: cogent.core.moltype object. Must be DNA or RNA - cutoff: bitscore cutoff. No sequences < cutoff will be kept in search results. (Default=0.0). Infernal documentation suggests a cutoff of log2(number nucleotides searching) will give most likely true homologs.
385,865
def wipe_container(self): if self.test_run: print("Wipe would delete {0} objects.".format(len(self.container.object_count))) else: if not self.quiet or self.verbosity > 1: print("Deleting {0} objects...".format(len(self.container.object_count))) self._connection.delete_all_objects()
Completely wipes out the contents of the container.
385,866
def updateMktDepthL2(self, id, position, marketMaker, operation, side, price, size): return _swigibpy.EWrapper_updateMktDepthL2(self, id, position, marketMaker, operation, side, price, size)
updateMktDepthL2(EWrapper self, TickerId id, int position, IBString marketMaker, int operation, int side, double price, int size)
385,867
def forwards(self, orm): "Write your forwards methods here." User = orm[user_orm_label] try: user = User.objects.all()[0] for article in orm.Article.objects.all(): article.author = user article.save() except IndexError: pass
Write your forwards methods here.
385,868
def set_limit(self, param): limit = int(param) if -2000 <= limit <= 6000: self.device.temperature_limit = limit / 10.0 return ""
Models "Limit Command" functionality of device. Sets the target temperate to be reached. :param param: Target temperature in C, multiplied by 10, as a string. Can be negative. :return: Empty string.
385,869
def set_stepdown_window(self, start, end, enabled=True, scheduled=True, weekly=True): if not start < end: raise TypeError() week_delta = datetime.timedelta(days=7) if not ((end - start) <= week_delta): raise TypeError() url = self._service_url + data = { : int(start.strftime()), : int(end.strftime()), : enabled, : scheduled, : weekly, } response = requests.post( url, data=json.dumps(data), **self._instances._default_request_kwargs ) return response.json()
Set the stepdown window for this instance. Date times are assumed to be UTC, so use UTC date times. :param datetime.datetime start: The datetime which the stepdown window is to open. :param datetime.datetime end: The datetime which the stepdown window is to close. :param bool enabled: A boolean indicating whether or not stepdown is to be enabled. :param bool scheduled: A boolean indicating whether or not to schedule stepdown. :param bool weekly: A boolean indicating whether or not to schedule compaction weekly.
385,870
def add_store(name, store, saltenv=): ret = {: name, : True, : , : {}} cert_file = __salt__[](name, saltenv) if cert_file is False: ret[] = False ret[] += else: cert_serial = __salt__[](cert_file) serials = __salt__[](store) if cert_serial not in serials: out = __salt__[](name, store) if "successfully" in out: ret[][] = name else: ret[] = False ret[] += "Failed to store certificate {0}".format(name) else: ret[] += "{0} already stored.".format(name) return ret
Store a certificate to the given store name The certificate to store, this can use local paths or salt:// paths store The store to add the certificate to saltenv The salt environment to use, this is ignored if a local path is specified
385,871
def add_format(self, mimetype, format, requires_context=False): self.formats[mimetype] = format if not requires_context: self.ctxless_mimetypes.append(mimetype) self.all_mimetypes.append(mimetype)
Registers a new format to be used in a graph's serialize call If you've installed an rdflib serializer plugin, use this to add it to the content negotiation system Set requires_context=True if this format requires a context-aware graph
385,872
def urlopen(self, method, url, body=None, headers=None, retries=None, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, chunked=False, body_pos=None, **response_kw): if headers is None: headers = self.headers if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect, default=self.retries) if release_conn is None: release_conn = response_kw.get(, True) if assert_same_host and not self.is_same_host(url): raise HostChangedError(self, url, retries) conn = None release_this_conn = release_conn response_conn = conn if not release_conn else None response_kw[] = method drain_and_release_conn(response) raise return response drain_and_release_conn(response) retries.sleep_for_retry(response) log.debug("Redirecting %s -> %s", url, redirect_location) return self.urlopen( method, redirect_location, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) has_retry_after = bool(response.getheader()) if retries.is_retry(method, response.status, has_retry_after): try: retries = retries.increment(method, url, response=response, _pool=self) except MaxRetryError: if retries.raise_on_status: drain_and_release_conn(response) raise return response drain_and_release_conn(response) retries.sleep(response) log.debug("Retry: %s", url) return self.urlopen( method, url, body, headers, retries=retries, redirect=redirect, assert_same_host=assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, body_pos=body_pos, **response_kw) return response
Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param body: Data to send in the request body (useful for creating POST requests, see HTTPConnectionPool.post_url for more convenience). :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Configure the number of retries to allow before raising a :class:`~urllib3.exceptions.MaxRetryError` exception. Pass ``None`` to retry until you receive a response. Pass a :class:`~urllib3.util.retry.Retry` object for fine-grained control over different types of retries. Pass an integer number to retry connection errors that many times, but no other types of errors. Pass zero to never retry. If ``False``, then retries are disabled and any exception is raised immediately. Also, instead of raising a MaxRetryError on redirects, the redirect response will be returned. :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307, 308). Each redirect counts as a retry. Disabling retries will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When False, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. It may be a float (in seconds) or an instance of :class:`urllib3.util.Timeout`. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param chunked: If True, urllib3 will send the body using chunked transfer encoding. Otherwise, urllib3 will send the body using the standard content-length form. Defaults to False. :param int body_pos: Position to seek to in file-like body in the event of a retry or redirect. Typically this won't need to be set because urllib3 will auto-populate the value when needed. :param \\**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib`
385,873
def create_permissions_from_tuples(model, codename_tpls): if codename_tpls: model_cls = django_apps.get_model(model) content_type = ContentType.objects.get_for_model(model_cls) for codename_tpl in codename_tpls: app_label, codename, name = get_from_codename_tuple( codename_tpl, model_cls._meta.app_label ) try: Permission.objects.get(codename=codename, content_type=content_type) except ObjectDoesNotExist: Permission.objects.create( name=name, codename=codename, content_type=content_type ) verify_codename_exists(f"{app_label}.{codename}")
Creates custom permissions on model "model".
385,874
def auth(self): self.send(nsq.auth(self.auth_secret)) frame, data = self.read_response() if frame == nsq.FRAME_TYPE_ERROR: raise data try: response = json.loads(data.decode()) except ValueError: self.close_stream() raise errors.NSQException( .format(data)) self.on_auth.send(self, response=response) return response
Send authorization secret to nsqd.
385,875
def __replace_adjective(sentence, counts): if sentence is not None: while sentence.find() != -1: sentence = sentence.replace(, str(__get_adjective(counts)), 1) if sentence.find() == -1: return sentence return sentence else: return sentence
Lets find and replace all instances of #ADJECTIVE :param _sentence: :param counts:
385,876
def _maybe_trim_strings(self, array, **keys): trim_strings = keys.get(, False) if self.trim_strings or trim_strings: _trim_strings(array)
if requested, trim trailing white space from all string fields in the input array
385,877
def update_next_block(self): last = self.mem[-1] if last.inst not in (, , ) or last.condition_flag is not None: return if last.inst == : if self.next is not None: self.next.delete_from(self) self.delete_goes(self.next) return if last.opers[0] not in LABELS.keys(): __DEBUG__("INFO: %s is not defined. No optimization is done." % last.opers[0], 2) LABELS[last.opers[0]] = LabelInfo(last.opers[0], 0, DummyBasicBlock(ALL_REGS, ALL_REGS)) n_block = LABELS[last.opers[0]].basic_block if self.next is n_block: return if self.next.prev == self: self.next.delete_from(self) self.delete_goes(self.next) self.next = n_block self.next.add_comes_from(self) self.add_goes_to(self.next)
If the last instruction of this block is a JP, JR or RET (with no conditions) then the next and goes_to sets just contains a single block
385,878
async def fetchmany(self, size: int = None) -> Iterable[sqlite3.Row]: args = () if size is not None: args = (size,) return await self._execute(self._cursor.fetchmany, *args)
Fetch up to `cursor.arraysize` number of rows.
385,879
def load_dataset(data_name): if data_name == or data_name == : train_dataset, output_size = _load_file(data_name) vocab, max_len = _build_vocab(data_name, train_dataset, []) train_dataset, train_data_lengths = _preprocess_dataset(train_dataset, vocab, max_len) return vocab, max_len, output_size, train_dataset, train_data_lengths else: train_dataset, test_dataset, output_size = _load_file(data_name) vocab, max_len = _build_vocab(data_name, train_dataset, test_dataset) train_dataset, train_data_lengths = _preprocess_dataset(train_dataset, vocab, max_len) test_dataset, test_data_lengths = _preprocess_dataset(test_dataset, vocab, max_len) return vocab, max_len, output_size, train_dataset, train_data_lengths, test_dataset, \ test_data_lengths
Load sentiment dataset.
385,880
def natural_ipv4_netmask(ip, fmt=): bits = _ipv4_to_bits(ip) if bits.startswith(): mask = elif bits.startswith(): mask = else: mask = if fmt == : return cidr_to_ipv4_netmask(mask) else: return + mask
Returns the "natural" mask of an IPv4 address
385,881
def _record(self): return struct.pack(self.FMT, 1, self.platform_id, 0, self.id_string, self.checksum, 0x55, 0xaa)
An internal method to generate a string representing this El Torito Validation Entry. Parameters: None. Returns: String representing this El Torito Validation Entry.
385,882
def compare_tags(self, tags): all_tags = [] for task in self._tasks: all_tags.extend(task.tags) all_tags_set = set(all_tags) tags_set = set(tags) matched_tags = all_tags_set & tags_set unmatched_tags = all_tags_set - tags_set return matched_tags, unmatched_tags
given a list of tags that the user has specified, return two lists: matched_tags: tags were found within the current play and match those given by the user unmatched_tags: tags that were found within the current play but do not match any provided by the user
385,883
def get_inline_instances(self, request, *args, **kwargs): inlines = super(PlaceholderEditorAdmin, self).get_inline_instances(request, *args, **kwargs) extra_inline_instances = [] inlinetypes = self.get_extra_inlines() for InlineType in inlinetypes: inline_instance = InlineType(self.model, self.admin_site) extra_inline_instances.append(inline_instance) return extra_inline_instances + inlines
Create the inlines for the admin, including the placeholder and contentitem inlines.
385,884
def _plot_colorbar(mappable, fig, subplot_spec, max_cbar_height=4): width, height = fig.get_size_inches() if height > max_cbar_height: axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=subplot_spec, height_ratios=[height - max_cbar_height, max_cbar_height]) heatmap_cbar_ax = fig.add_subplot(axs2[1]) else: heatmap_cbar_ax = fig.add_subplot(subplot_spec) pl.colorbar(mappable, cax=heatmap_cbar_ax) return heatmap_cbar_ax
Plots a vertical color bar based on mappable. The height of the colorbar is min(figure-height, max_cmap_height) Parameters ---------- mappable : The image to which the colorbar applies. fig ; The figure object subplot_spec : the gridspec subplot. Eg. axs[1,2] max_cbar_height : `float` The maximum colorbar height Returns ------- color bar ax
385,885
def add_transaction_clause(self, clause): if not isinstance(clause, TransactionClause): raise StatementException() clause.set_context_id(self.context_counter) self.context_counter += clause.get_context_size() self.transactions.append(clause)
Adds a iff clause to this statement :param clause: The clause that will be added to the iff statement :type clause: TransactionClause
385,886
def create_parser() -> FileAwareParser: parser = FileAwareParser(description="Clear data from FHIR observation fact table", prog="removefacts", use_defaults=False) parser.add_argument("-ss", "--sourcesystem", metavar="SOURCE SYSTEM CODE", help="Sourcesystem code") parser.add_argument("-u", "--uploadid", metavar="UPLOAD IDENTIFIER", help="Upload identifer -- uniquely identifies this batch", type=int, nargs=) add_connection_args(parser, strong_config_file=False) parser.add_argument("-p", "--testprefix", metavar="SS PREFIX", help=f"Sourcesystem_cd prefix for test suite functions (Default: {default_test_prefix}") parser.add_argument("--testlist", help="List leftover test suite entries", action="store_true") parser.add_argument("--removetestlist", help="Remove leftover test suite entries", action="store_true") return parser
Create a command line parser :return: parser
385,887
def write_extra_data(self, stream: WriteStream) -> None: if self.params: stream.align(8) if self._params_offset_writer: self._params_offset_writer.write_current_offset(stream) else: self._params_offset = stream.tell() self.params.write(stream) if self.actions: stream.align(8) if self._actions_offset_writer: self._actions_offset_writer.write_current_offset(stream) else: self._actions_offset = stream.tell() for s in self.actions: stream.write_string_ref(s.v) if self.queries: stream.align(8) if self._queries_offset_writer: self._queries_offset_writer.write_current_offset(stream) else: self._queries_offset = stream.tell() for s in self.queries: stream.write_string_ref(s.v)
Writes the param container and string pointer arrays. Unlike other write_extra_data functions, this can be called before write().
385,888
def resolve_imports(self, imports, import_depth, parser=None): if imports and import_depth: for i in list(self.imports): try: if os.path.exists(i) or i.startswith((, )): self.merge(Ontology(i, import_depth=import_depth-1, parser=parser)) else: self.merge(Ontology( os.path.join(os.path.dirname(self.path), i), import_depth=import_depth-1, parser=parser)) except (IOError, OSError, URLError, HTTPError, _etree.ParseError) as e: warnings.warn("{} occured during import of " "{}".format(type(e).__name__, i), ProntoWarning)
Import required ontologies.
385,889
def MGMT_ED_SCAN(self, sAddr, xCommissionerSessionId, listChannelMask, xCount, xPeriod, xScanDuration): print % self.port channelMask = channelMask = + self.__convertLongToString(self.__convertChannelMask(listChannelMask)) try: cmd = % (channelMask, xCount, xPeriod, xScanDuration, sAddr) print cmd return self.__sendCommand(cmd) == except Exception, e: ModuleHelper.writeintodebuglogger("MGMT_ED_SCAN() error: " + str(e))
send MGMT_ED_SCAN message to a given destinaition. Args: sAddr: IPv6 destination address for this message xCommissionerSessionId: commissioner session id listChannelMask: a channel array to indicate which channels to be scaned xCount: number of IEEE 802.15.4 ED Scans (milliseconds) xPeriod: Period between successive IEEE802.15.4 ED Scans (milliseconds) xScanDuration: IEEE 802.15.4 ScanDuration to use when performing an IEEE 802.15.4 ED Scan (milliseconds) Returns: True: successful to send MGMT_ED_SCAN message. False: fail to send MGMT_ED_SCAN message
385,890
def create_job(db, datadir): calc_id = get_calc_id(db, datadir) + 1 job = dict(id=calc_id, is_running=1, description=, user_name=, calculation_mode=, ds_calc_dir=os.path.join( % (datadir, calc_id))) return db(, job.keys(), job.values()).lastrowid
Create job for the given user, return it. :param db: a :class:`openquake.server.dbapi.Db` instance :param datadir: Data directory of the user who owns/started this job. :returns: the job ID
385,891
def devices(self): context = Context() existing_devices = context.list_devices(subsystem="hidraw") future_devices = self._get_future_devices(context) for hidraw_device in itertools.chain(existing_devices, future_devices): hid_device = hidraw_device.parent if hid_device.subsystem != "hid": continue cls = HID_DEVICES.get(hid_device.get("HID_NAME")) if not cls: continue for child in hid_device.parent.children: event_device = child.get("DEVNAME", "") if event_device.startswith("/dev/input/event"): break else: continue try: device_addr = hid_device.get("HID_UNIQ", "").upper() if device_addr: device_name = "{0} {1}".format(device_addr, hidraw_device.sys_name) else: device_name = hidraw_device.sys_name yield cls(name=device_name, addr=device_addr, type=cls.__type__, hidraw_device=hidraw_device.device_node, event_device=event_device) except DeviceError as err: self.logger.error("Unable to open DS4 device: {0}", err)
Wait for new DS4 devices to appear.
385,892
def cut_mechanisms(self): for mechanism in utils.powerset(self.node_indices, nonempty=True): micro_mechanism = self.macro2micro(mechanism) if self.cut.splits_mechanism(micro_mechanism): yield mechanism
The mechanisms of this system that are currently cut. Note that although ``cut_indices`` returns micro indices, this returns macro mechanisms. Yields: tuple[int]
385,893
def resource_filename(package_or_requirement, resource_name): if pkg_resources.resource_exists(package_or_requirement, resource_name): return pkg_resources.resource_filename(package_or_requirement, resource_name) path = _search_in_share_folders(package_or_requirement, resource_name) if path: return path raise RuntimeError("Resource {} not found in {}".format(package_or_requirement, resource_name))
Similar to pkg_resources.resource_filename but if the resource it not found via pkg_resources it also looks in a predefined list of paths in order to find the resource :param package_or_requirement: the module in which the resource resides :param resource_name: the name of the resource :return: the path to the resource :rtype: str
385,894
def build_model(self): if self.model_config[]: return self.build_red() elif self.model_config[]: return self.buidl_hred() else: raise Error("Unrecognized model type ".format(self.model_config[]))
Find out the type of model configured and dispatch the request to the appropriate method
385,895
def check_config(conf): if in conf and not isinstance(conf[], string_types): raise TypeError(TAG + ": `fmode` must be a string") if in conf and not isinstance(conf[], string_types): raise TypeError(TAG + ": `dmode` must be a string") if in conf: if not isinstance(conf[], int): raise TypeError(TAG + ": `depth` must be an int") if conf[] < 0: raise ValueError(TAG + ": `depth` must be a positive number") if in conf: if not isinstance(conf[], string_types): raise TypeError(TAG + ": `hash_alg` must be a string") if conf[] not in ACCEPTED_HASH_ALG: raise ValueError(TAG + ": `hash_alg` must be one of " + str(ACCEPTED_HASH_ALG))
Type and boundary check
385,896
def Authenticate(self, app_id, challenge, registered_keys): client_data = model.ClientData(model.ClientData.TYP_AUTHENTICATION, challenge, self.origin) app_param = self.InternalSHA256(app_id) challenge_param = self.InternalSHA256(client_data.GetJson()) num_invalid_keys = 0 for key in registered_keys: try: if key.version != u: continue for _ in range(30): try: resp = self.security_key.CmdAuthenticate(challenge_param, app_param, key.key_handle) return model.SignResponse(key.key_handle, resp, client_data) except errors.TUPRequiredError: self.security_key.CmdWink() time.sleep(0.5) except errors.InvalidKeyHandleError: num_invalid_keys += 1 continue except errors.HardwareError as e: raise errors.U2FError(errors.U2FError.BAD_REQUEST, e) if num_invalid_keys == len(registered_keys): raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE) raise errors.U2FError(errors.U2FError.TIMEOUT)
Authenticates app_id with the security key. Executes the U2F authentication/signature flow with the security key. Args: app_id: The app_id to register the security key against. challenge: Server challenge passed to the security key as a bytes object. registered_keys: List of keys already registered for this app_id+user. Returns: SignResponse with client_data, key_handle, and signature_data. The client data is an object, while the signature_data is encoded in FIDO U2F binary format. Raises: U2FError: There was some kind of problem with authentication (e.g. there was a timeout while waiting for the test of user presence.)
385,897
def exists(self, uri): try: urllib.request.urlopen(uri) return True except urllib.error.HTTPError: return False
Method returns true is the entity exists in the Repository, false, otherwise Args: uri(str): Entity URI Returns: bool
385,898
def get_parent_families(self, family_id): if self._catalog_session is not None: return self._catalog_session.get_parent_catalogs(catalog_id=family_id) return FamilyLookupSession( self._proxy, self._runtime).get_families_by_ids( list(self.get_parent_family_ids(family_id)))
Gets the parent families of the given ``id``. arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` to query return: (osid.relationship.FamilyList) - the parent families of the ``id`` raise: NotFound - a ``Family`` identified by ``Id is`` not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
385,899
def is_valid(edtf_candidate): if ( isLevel0(edtf_candidate) or isLevel1(edtf_candidate) or isLevel2(edtf_candidate) ): if in edtf_candidate: return is_valid_interval(edtf_candidate) else: return True else: return False
isValid takes a candidate date and returns if it is valid or not