Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
377,400
def parseService(self, yadis_url, uri, type_uris, service_element): self.type_uris = type_uris self.server_url = uri self.used_yadis = True if not self.isOPIdentifier(): self.local_id = findOPLocalIdentifier(service_element, self.type_uris) self.claimed_id = yadis_url
Set the state of this object based on the contents of the service element.
377,401
def set_subparsers_args(self, *args, **kwargs): self.subparsers_args = args self.subparsers_kwargs = kwargs
Sets args and kwargs that are passed when creating a subparsers group in an argparse.ArgumentParser i.e. when calling argparser.ArgumentParser.add_subparsers
377,402
def add_thermodynamic(self, em=1000): internal = set(r for r in self._model.reactions if not self._model.is_exchange(r)) v = self._v alpha = self._prob.namespace(internal, types=lp.VariableType.Binary) dmu = self._prob.namespace(internal) for reaction_id in self._model.reactions: if not self._model.is_exchange(reaction_id): flux = v(reaction_id) alpha_r = alpha(reaction_id) dmu_r = dmu(reaction_id) lower, upper = self._model.limits[reaction_id] self._prob.add_linear_constraints( flux >= lower * (1 - alpha_r), flux <= upper * alpha_r, dmu_r >= -em * alpha_r + (1 - alpha_r), dmu_r <= em * (1 - alpha_r) - alpha_r) mu = self._prob.namespace(self._model.compounds) tdbalance_lhs = {reaction_id: 0 for reaction_id in self._model.reactions} for spec, value in iteritems(self._model.matrix): compound, reaction_id = spec if not self._model.is_exchange(reaction_id): tdbalance_lhs[reaction_id] += mu(compound) * value for reaction_id, lhs in iteritems(tdbalance_lhs): if not self._model.is_exchange(reaction_id): self._prob.add_linear_constraints(lhs == dmu(reaction_id))
Apply thermodynamic constraints to the model. Adding these constraints restricts the solution space to only contain solutions that have no internal loops [Schilling00]_. This is solved as a MILP problem as described in [Muller13]_. The time to solve a problem with thermodynamic constraints is usually much longer than a normal FBA problem. The ``em`` parameter is the upper bound on the delta mu reaction variables. This parameter has to be balanced based on the model size since setting the value too low can result in the correct solutions being infeasible and setting the value too high can result in numerical instability which again makes the correct solutions infeasible. The default value should work in all cases as long as the model is not unusually large.
377,403
async def async_get_current_program(channel, no_cache=False): chan = await async_determine_channel(channel) guide = await async_get_program_guide(chan, no_cache) if not guide: _LOGGER.warning(, channel) return now = datetime.datetime.now() for prog in guide: start = prog.get() end = prog.get() if now > start and now < end: return prog
Get the current program info
377,404
def setItemPolicy(self, item, policy): index = item._combobox_indices[self.ColAction].get(policy, 0) self._updateItemComboBoxIndex(item, self.ColAction, index) combobox = self.itemWidget(item, self.ColAction) if combobox: combobox.setCurrentIndex(index)
Sets the policy of the given item
377,405
def get_users(profile=): grafana* if isinstance(profile, string_types): profile = __salt__[](profile) response = requests.get( .format(profile[]), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get(, 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
List all users. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_users
377,406
def make_qr(content, error=None, version=None, mode=None, mask=None, encoding=None, eci=False, boost_error=True): return make(content, error=error, version=version, mode=mode, mask=mask, encoding=encoding, eci=eci, micro=False, boost_error=boost_error)
\ Creates a QR Code (never a Micro QR Code). See :py:func:`make` for a description of the parameters. :rtype: QRCode
377,407
def get_or_create_model_key(self): model_cache_info = model_cache_backend.retrieve_model_cache_info(self.model._meta.db_table) if not model_cache_info: return uuid.uuid4().hex, True return model_cache_info.table_key, False
Get or create key for the model. Returns ~~~~~~~ (model_key, boolean) tuple
377,408
def compile_pycos(toc): global BUILDPATH basepath = os.path.join(BUILDPATH, "localpycos") new_toc = [] for (nm, fnm, typ) in toc: source_fnm = fnm[:-1] leading, mod_name = nm.split(".")[:-1], nm.split(".")[-1] else: leading, mod_name = nm.split("."), "__init__" leading = os.path.join(basepath, *leading) if not os.path.exists(leading): os.makedirs(leading) fnm = os.path.join(leading, mod_name + ext) py_compile.compile(source_fnm, fnm) new_toc.append((nm, fnm, typ)) return new_toc
Given a TOC or equivalent list of tuples, generates all the required pyc/pyo files, writing in a local directory if required, and returns the list of tuples with the updated pathnames.
377,409
def contents(self): c = self._header[:] c.append(.format(self.font_weight)) c.append(.format(self.font_family)) c.append(.format(*self.screen_size)) sclw = self.original_size[0] * self.scale_factor sclh = self.original_size[1] * self.scale_factor longside = max([sclw, sclh]) width = round(longside + self.margin * 2, 2) height = round(longside + self.margin * 2, 2) xleft = round(-self.margin - (longside - sclw) / 2, 2) ytop = round(-self.margin - (longside - sclh) / 2, 2) c.append(.format( xleft, ytop, width, height)) if self.bgcolor is not None: c.append(.format(xleft, ytop, width, height, self.bgcolor)) c.extend(self._elems) c.append("</svg>") return "".join(c)
Get svg string
377,410
def _seg(chars): s = ret = [] flag = 0 for n, c in enumerate(chars): if RE_HANS.match(c): if n == 0: flag = 0 if flag == 0: s += c else: ret.append(s) flag = 0 s = c else: if n == 0: flag = 1 if flag == 1: s += c else: ret.append(s) flag = 1 s = c ret.append(s) return ret
按是否是汉字进行分词
377,411
def delete(ctx, short_name): wva = get_wva(ctx) subscription = wva.get_subscription(short_name) subscription.delete()
Delete a specific subscription by short name
377,412
def transform_audio(self, y): temposcale data = super(TempoScale, self).transform_audio(y) data[] = np.abs(fmt(data.pop(), axis=1, n_fmt=self.n_fmt)).astype(np.float32)[self.idx] return data
Apply the scale transform to the tempogram Parameters ---------- y : np.ndarray The audio buffer Returns ------- data : dict data['temposcale'] : np.ndarray, shape=(n_frames, n_fmt) The scale transform magnitude coefficients
377,413
def default_namespace(self, value): if value is not None: assert type(value) is unicode, " attribute: type is not !".format( "default_namespace", value) self.__default_namespace = value
Setter for **self.__default_namespace** attribute. :param value: Attribute value. :type value: unicode
377,414
def handle_msg(self, msg): if msg.type == BGP_MSG_KEEPALIVE: if self.state.bgp_state == const.BGP_FSM_OPEN_CONFIRM: self.state.bgp_state = const.BGP_FSM_ESTABLISHED self._enqueue_init_updates() elif msg.type == BGP_MSG_UPDATE: assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED self._handle_update_msg(msg) elif msg.type == BGP_MSG_ROUTE_REFRESH: assert self.state.bgp_state == const.BGP_FSM_ESTABLISHED self._handle_route_refresh_msg(msg) else: raise ValueError( % (msg, self.state.bgp_state))
BGP message handler. BGP message handling is shared between protocol instance and peer. Peer only handles limited messages under suitable state. Here we handle KEEPALIVE, UPDATE and ROUTE_REFRESH messages. UPDATE and ROUTE_REFRESH messages are handled only after session is established.
377,415
def get_price(item): the_price = "No Default Pricing" for price in item.get(, []): if not price.get(): the_price = "%0.4f" % float(price[]) return the_price
Finds the price with the default locationGroupId
377,416
def gevent_monkey_patch_report(self): try: import gevent.socket import socket if gevent.socket.socket is socket.socket: self.log("gevent monkey patching is active") return True else: self.notify_user("gevent monkey patching failed.") except ImportError: self.notify_user("gevent is not installed, monkey patching failed.") return False
Report effective gevent monkey patching on the logs.
377,417
def _format_job_instance(job): ret = {: job.get(, ), : salt.utils.json.loads(job.get(, )), : job.get(, ), : job.get(, ), : job.get(, )} return ret
Format the job instance correctly
377,418
def size(self, source): result = [] for src in self.source_expand(source): size = 0 for f in self.s3walk(src): size += f[] result.append((src, size)) return result
Get the size component of the given s3url. If it is a directory, combine the sizes of all the files under that directory. Subdirectories will not be counted unless --recursive option is set.
377,419
def trace(function, *args, **k) : if doTrace : print ("> "+function.__name__, args, k) result = function(*args, **k) if doTrace : print ("< "+function.__name__, args, k, "->", result) return result
Decorates a function by tracing the begining and end of the function execution, if doTrace global is True
377,420
def list(self, *args, **kwargs): greedy = kwargs.pop(, False) resp = self.client.api.networks(*args, **kwargs) networks = [self.prepare_model(item) for item in resp] if greedy and version_gte(self.client.api._version, ): for net in networks: net.reload() return networks
List networks. Similar to the ``docker networks ls`` command. Args: names (:py:class:`list`): List of names to filter by. ids (:py:class:`list`): List of ids to filter by. filters (dict): Filters to be processed on the network list. Available filters: - ``driver=[<driver-name>]`` Matches a network's driver. - ``label=[<key>]`` or ``label=[<key>=<value>]``. - ``type=["custom"|"builtin"]`` Filters networks by type. greedy (bool): Fetch more details for each network individually. You might want this to get the containers attached to them. Returns: (list of :py:class:`Network`) The networks on the server. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
377,421
def step1ab(self): if self.b[self.k] == "s": if self.ends("sses"): self.k = self.k - 2 elif self.ends("ies"): self.setto("i") elif self.b[self.k - 1] != "s": self.k = self.k - 1 if self.ends("eed"): if self.m() > 0: self.k = self.k - 1 elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem(): self.k = self.j if self.ends("at"): self.setto("ate") elif self.ends("bl"): self.setto("ble") elif self.ends("iz"): self.setto("ize") elif self.doublec(self.k): self.k = self.k - 1 ch = self.b[self.k] if ch == "l" or ch == "s" or ch == "z": self.k = self.k + 1 elif self.m() == 1 and self.cvc(self.k): self.setto("e")
step1ab() gets rid of plurals and -ed or -ing. e.g. caresses -> caress ponies -> poni ties -> ti caress -> caress cats -> cat feed -> feed agreed -> agree disabled -> disable matting -> mat mating -> mate meeting -> meet milling -> mill messing -> mess meetings -> meet
377,422
def _compute_zs_mat(sz:TensorImageSize, scale:float, squish:float, invert:bool, row_pct:float, col_pct:float)->AffineMatrix: "Utility routine to compute zoom/squish matrix." orig_ratio = math.sqrt(sz[1]/sz[0]) for s,r,i in zip(scale,squish, invert): s,r = 1/math.sqrt(s),math.sqrt(r) if s * r <= 1 and s / r <= 1: w,h = (s/r, s*r) if i else (s*r,s/r) col_c = (1-w) * (2*col_pct - 1) row_c = (1-h) * (2*row_pct - 1) return _get_zoom_mat(w, h, col_c, row_c) if orig_ratio > 1: return _get_zoom_mat(1/orig_ratio**2, 1, 0, 0.) else: return _get_zoom_mat(1, orig_ratio**2, 0, 0.)
Utility routine to compute zoom/squish matrix.
377,423
def _set_data(self, data, offset=None, copy=False): data = np.array(data, copy=copy) data = self._normalize_shape(data) if offset is None: self._resize(data.shape) elif all([i == 0 for i in offset]) and data.shape == self._shape: self._resize(data.shape) offset = offset or tuple([0 for i in range(self._ndim)]) assert len(offset) == self._ndim for i in range(len(data.shape)-1): if offset[i] + data.shape[i] > self._shape[i]: raise ValueError("Data is too large") self._glir.command(, self._id, offset, data)
Internal method for set_data.
377,424
def _generate_struct_deserializer(self, struct): struct_name = fmt_class_prefix(struct) with self.block_func( func=, args=fmt_func_args_declaration([(, )]), return_type=.format(struct_name), class_func=True): if not struct.all_fields and not struct.has_enumerated_subtypes(): self.emit() def emit_struct_deserialize_logic(struct): for field in struct.all_fields: data_type, nullable = unwrap_nullable(field.data_type) input_value = .format(field.name) if is_primitive_type(data_type): deserialize_call = input_value else: deserialize_call = self._fmt_serialization_call( field.data_type, input_value, False) if nullable or field.has_default: default_value = fmt_default_value( field) if field.has_default else if is_primitive_type(data_type): deserialize_call = .format( input_value, default_value) else: deserialize_call = .format( input_value, deserialize_call, default_value) self.emit(.format( fmt_type(field.data_type), fmt_var(field.name), deserialize_call)) self.emit() deserialized_obj_args = [(fmt_var(f.name), fmt_var(f.name)) for f in struct.all_fields] init_call = fmt_func_call( caller=fmt_alloc_call(caller=struct_name), callee=self._cstor_name_from_fields(struct.all_fields), args=fmt_func_args(deserialized_obj_args)) self.emit(.format(init_call)) if not struct.has_enumerated_subtypes(): emit_struct_deserialize_logic(struct) else: for tags, subtype in struct.get_all_subtypes_with_tags(): assert len(tags) == 1, tags tag = tags[0] base_string = with self.block(base_string.format(tag)): caller = fmt_serial_class(fmt_class_prefix(subtype)) args = fmt_func_args([(, )]) deserialize_call = fmt_func_call( caller=caller, callee=, args=args) self.emit(.format(deserialize_call)) self.emit() if struct.is_catch_all(): emit_struct_deserialize_logic(struct) else: description_str = ( ) self._generate_throw_error(, description_str) self.emit()
Emits the deserialize method for the serialization object for the given struct.
377,425
def read_string(self, len): format = + str(len) + length = struct.calcsize(format) info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length return info[0]
Reads a string of a given length from the packet
377,426
def __dtw_calc_accu_cost(C, D, D_steps, step_sizes_sigma, weights_mul, weights_add, max_0, max_1): for cur_n in range(max_0, D.shape[0]): for cur_m in range(max_1, D.shape[1]): for cur_step_idx, cur_w_add, cur_w_mul in zip(range(step_sizes_sigma.shape[0]), weights_add, weights_mul): cur_D = D[cur_n - step_sizes_sigma[cur_step_idx, 0], cur_m - step_sizes_sigma[cur_step_idx, 1]] cur_C = cur_w_mul * C[cur_n - max_0, cur_m - max_1] cur_C += cur_w_add cur_cost = cur_D + cur_C if cur_cost < D[cur_n, cur_m]: D[cur_n, cur_m] = cur_cost D_steps[cur_n, cur_m] = cur_step_idx return D, D_steps
Calculate the accumulated cost matrix D. Use dynamic programming to calculate the accumulated costs. Parameters ---------- C : np.ndarray [shape=(N, M)] pre-computed cost matrix D : np.ndarray [shape=(N, M)] accumulated cost matrix D_steps : np.ndarray [shape=(N, M)] steps which were used for calculating D step_sizes_sigma : np.ndarray [shape=[n, 2]] Specifies allowed step sizes as used by the dtw. weights_add : np.ndarray [shape=[n, ]] Additive weights to penalize certain step sizes. weights_mul : np.ndarray [shape=[n, ]] Multiplicative weights to penalize certain step sizes. max_0 : int maximum number of steps in step_sizes_sigma in dim 0. max_1 : int maximum number of steps in step_sizes_sigma in dim 1. Returns ------- D : np.ndarray [shape=(N,M)] accumulated cost matrix. D[N,M] is the total alignment cost. When doing subsequence DTW, D[N,:] indicates a matching function. D_steps : np.ndarray [shape=(N,M)] steps which were used for calculating D. See Also -------- dtw
377,427
def split_grads_by_size(threshold_size, device_grads): small_grads = [] large_grads = [] for dl in device_grads: small_dl = [] large_dl = [] for (g, v) in dl: tensor_size = g.get_shape().num_elements() if tensor_size <= threshold_size: small_dl.append([g, v]) else: large_dl.append([g, v]) if small_dl: small_grads.append(small_dl) if large_dl: large_grads.append(large_dl) return small_grads, large_grads
Break gradients into two sets according to tensor size. Args: threshold_size: int size cutoff for small vs large tensor. device_grads: List of lists of (gradient, variable) tuples. The outer list is over devices. The inner list is over individual gradients. Returns: small_grads: Subset of device_grads where shape is <= theshold_size elements. large_grads: Subset of device_grads where shape is > threshold_size elements.
377,428
def fuse_batchnorm_weights(gamma, beta, mean, var, epsilon): scale = gamma / np.sqrt(var + epsilon) bias = beta - gamma * mean / np.sqrt(var + epsilon) return [scale, bias]
float sqrt_var = sqrt(var_data[i]); a_data[i] = bias_data[i] - slope_data[i] * mean_data[i] / sqrt_var; b_data[i] = slope_data[i] / sqrt_var; ... ptr[i] = b * ptr[i] + a;
377,429
def _unpickle_channel(raw): try: return pickle.loads(raw) except (ValueError, pickle.UnpicklingError, EOFError, TypeError, IndexError) as exc: if isinstance(raw, bytes): raw = raw.decode() try: Channel.MATCH.match(raw) except ValueError: raise exc return raw
Try and unpickle a channel with sensible error handling
377,430
def getCert(username, password, certHost=_SERVER, certfile=None, certQuery=_PROXY): if certfile is None: certfile = tempfile.NamedTemporaryFile() password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() top_level_url = "http://" + certHost logging.debug(top_level_url) password_mgr.add_password(None, top_level_url, username, password) handler = urllib2.HTTPBasicAuthHandler(password_mgr) logging.debug(str(handler)) opener = urllib2.build_opener(handler) urllib2.install_opener(opener) url = "http://" + certHost + certQuery logging.debug(url) r = None try: r = opener.open(url) except urllib2.HTTPError as e: logging.debug(url) logging.debug(str(e)) return False logging.debug(str(r)) if r is not None: while True: buf = r.read() logging.debug(buf) if not buf: break certfile.write(buf) r.close() return certfile
Access the cadc certificate server.
377,431
def _process_health_pill_value(self, wall_time, step, device_name, output_slot, node_name, tensor_proto, node_name_set=None): if node_name_set and node_name not in node_name_set: return None elements = list(tensor_util.make_ndarray(tensor_proto)) return HealthPillEvent( wall_time=wall_time, step=step, device_name=device_name, output_slot=output_slot, node_name=node_name, dtype=repr(tf.as_dtype(elements[12])), shape=elements[14:], value=elements)
Creates a HealthPillEvent containing various properties of a health pill. Args: wall_time: The wall time in seconds. step: The session run step of the event. device_name: The name of the node's device. output_slot: The numeric output slot. node_name: The name of the node (without the output slot). tensor_proto: A tensor proto of data. node_name_set: An optional set of node names that are relevant. If not provided, no filtering by relevance occurs. Returns: An event_accumulator.HealthPillEvent. Or None if one could not be created.
377,432
def iplot_histogram(data, figsize=None, number_to_keep=None, sort=, legend=None): html_template = Template() javascript_template = Template() div_number = str(time.time()) div_number = re.sub(, , div_number) if figsize is None: figsize = (7, 5) options = {: 0 if number_to_keep is None else number_to_keep, : sort, : 0, : int(figsize[0]), : int(figsize[1])} if legend: options[] = 1 data_to_plot = [] if isinstance(data, dict): data = [data] if legend and len(legend) != len(data): raise VisualizationError("Length of legendL (%s) doesnnumber_to_keepdatanamedivNumberdivNumberexecutionsoptions': options }) display(HTML(html + javascript))
Create a histogram representation. Graphical representation of the input array using a vertical bars style graph. Args: data (list or dict): This is either a list of dicts or a single dict containing the values to represent (ex. {'001' : 130}) figsize (tuple): Figure size in pixels. number_to_keep (int): The number of terms to plot and rest is made into a single bar called other values sort (string): Could be 'asc' or 'desc' legend (list): A list of strings to use for labels of the data. The number of entries must match the length of data. Raises: VisualizationError: When legend is provided and the length doesn't match the input data.
377,433
def new_data(self, mem, addr, data): done = False if mem.id == self.id: if addr == LocoMemory.MEM_LOCO_INFO: self.nr_of_anchors = data[0] if self.nr_of_anchors == 0: done = True else: self.anchor_data = \ [AnchorData() for _ in range(self.nr_of_anchors)] self._request_page(0) else: page = int((addr - LocoMemory.MEM_LOCO_ANCHOR_BASE) / LocoMemory.MEM_LOCO_ANCHOR_PAGE_SIZE) self.anchor_data[page].set_from_mem_data(data) next_page = page + 1 if next_page < self.nr_of_anchors: self._request_page(next_page) else: done = True if done: self.valid = True if self._update_finished_cb: self._update_finished_cb(self) self._update_finished_cb = None
Callback for when new memory data has been fetched
377,434
def nvmlDeviceSetEccMode(handle, mode): r fn = _nvmlGetFunctionPointer("nvmlDeviceSetEccMode") ret = fn(handle, _nvmlEnableState_t(mode)) _nvmlCheckReturn(ret) return None
r""" /** * Set the ECC mode for the device. * * For Kepler &tm; or newer fully supported devices. * Only applicable to devices with ECC. * Requires \a NVML_INFOROM_ECC version 1.0 or higher. * Requires root/admin permissions. * * The ECC mode determines whether the GPU enables its ECC support. * * This operation takes effect after the next reboot. * * See \ref nvmlEnableState_t for details on available modes. * * @param device The identifier of the target device * @param ecc The target ECC mode * * @return * - \ref NVML_SUCCESS if the ECC mode was set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceGetEccMode() */ nvmlReturn_t DECLDIR nvmlDeviceSetEccMode
377,435
def opened(self, block_identifier: BlockSpecification) -> bool: return self.token_network.channel_is_opened( participant1=self.participant1, participant2=self.participant2, block_identifier=block_identifier, channel_identifier=self.channel_identifier, )
Returns if the channel is opened.
377,436
def buildcontent(self): self.buildcontainer() self.buildjschart() self.htmlcontent = self.template_content_nvd3.render(chart=self)
Build HTML content only, no header or body tags. To be useful this will usually require the attribute `juqery_on_ready` to be set which will wrap the js in $(function(){<regular_js>};)
377,437
def basic_stats(self): comment_score = sum(comment.score for comment in self.comments) if self.comments: comment_duration = (self.comments[-1].created_utc - self.comments[0].created_utc) comment_rate = self._rate(len(self.comments), comment_duration) else: comment_rate = 0 submission_duration = self.max_date - self.min_date submission_rate = self._rate(len(self.submissions), submission_duration) submission_score = sum(sub.score for sub in self.submissions.values()) values = [(, len(self.submissions), len(self.comments)), (, .format(submission_rate), .format(comment_rate)), (, len(self.submitters), len(self.commenters)), (, submission_score, comment_score)] retval = .format(submission_duration / 86400.) retval += for quad in values: retval += .format(*quad) return retval +
Return a markdown representation of simple statistics.
377,438
def _create_ring(self, nodes): for node_name, node_conf in nodes: for w in range(0, node_conf[] * node_conf[]): self._distribution[node_name] += 1 self._ring[self.hashi( % (node_name, w))] = node_name self._keys = sorted(self._ring.keys())
Generate a ketama compatible continuum/ring.
377,439
def visualRect(self, index): rect = super(XTreeWidget, self).visualRect(index) item = self.itemFromIndex(index) if not rect.isNull() and item and item.isFirstColumnSpanned(): vpos = self.viewport().mapFromParent(QtCore.QPoint(0, 0)) rect.setX(vpos.x()) rect.setWidth(self.width()) return rect return rect
Returns the visual rectangle for the inputed index. :param index | <QModelIndex> :return <QtCore.QRect>
377,440
def zero_level_calibrate(self, duration, t0=0.0): t1 = t0 + duration indices = np.flatnonzero((self.timestamps >= t0) & (self.timestamps <= t1)) m = np.mean(self.gyro_data[:, indices], axis=1) self.gyro_data -= m.reshape(3,1) return self.gyro_data
Performs zero-level calibration from the chosen time interval. This changes the previously lodaded data in-place. Parameters -------------------- duration : float Number of timeunits to use for calibration t0 : float Starting time for calibration Returns ---------------------- gyro_data : (3, N) float ndarray The calibrated data (note that it is also changed in-place!)
377,441
def inference(self, observed_arr): if observed_arr.ndim < 4: observed_arr = np.expand_dims(observed_arr, axis=1) self.__add_channel_flag = True else: self.__add_channel_flag = False return super().inference(observed_arr)
Draws samples from the `true` distribution. Args: observed_arr: `np.ndarray` of observed data points. Returns: `np.ndarray` of inferenced.
377,442
def start(self, *args, **kwargs): args = (self.counter,) + args thread = threading.Thread( target=self._work_callback, args=args, kwargs=kwargs ) thread.setDaemon(self.daemon) thread.start()
Start the task. This is: * not threadsave * assumed to be called in the gtk mainloop
377,443
def orient_directed_graph(self, data, graph): warnings.warn("The algorithm is ran on the skeleton of the given graph.") return self.orient_undirected_graph(data, nx.Graph(graph))
Run the algorithm on a directed_graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.DiGraph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution on the given skeleton. .. warning:: The algorithm is ran on the skeleton of the given graph.
377,444
def nvmlUnitGetHandleByIndex(index): r c_index = c_uint(index) unit = c_nvmlUnit_t() fn = _nvmlGetFunctionPointer("nvmlUnitGetHandleByIndex") ret = fn(c_index, byref(unit)) _nvmlCheckReturn(ret) return bytes_to_str(unit)
r""" /** * Acquire the handle for a particular unit, based on its index. * * For S-class products. * * Valid indices are derived from the \a unitCount returned by \ref nvmlUnitGetCount(). * For example, if \a unitCount is 2 the valid indices are 0 and 1, corresponding to UNIT 0 and UNIT 1. * * The order in which NVML enumerates units has no guarantees of consistency between reboots. * * @param index The index of the target unit, >= 0 and < \a unitCount * @param unit Reference in which to return the unit handle * * @return * - \ref NVML_SUCCESS if \a unit has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlUnitGetHandleByIndex
377,445
def figure_tensor(func, **tf_pyfunc_kwargs): name = tf_pyfunc_kwargs.pop(, func.__name__) @wraps(func) def wrapper(*func_args, **func_kwargs): tf_args = PositionalTensorArgs(func_args) def pyfnc_callee(*tensor_values, **unused): try: figs = as_list(func(*tf_args.mix_args(tensor_values), **func_kwargs)) for f in figs: f.canvas.draw() return figure_buffer(figs) except Exception: print(*5 + + *5) print(traceback.format_exc()) print(*20) raise return tf.py_func(pyfnc_callee, tf_args.tensor_args, tf.uint8, name=name, **tf_pyfunc_kwargs) return wrapper
Decorate matplotlib drawing routines. This dectorator is meant to decorate functions that return matplotlib figures. The decorated function has to have the following signature def decorated(*args, **kwargs) -> figure or iterable of figures where `*args` can be any positional argument and `**kwargs` are any keyword arguments. The decorated function returns a tensor of shape `[NumFigures, Height, Width, 3]` of type `tf.uint8`. The drawing code is invoked during running of TensorFlow sessions, at a time when all positional tensor arguments have been evaluated by the session. The decorated function is then passed the tensor values. All non tensor arguments remain unchanged.
377,446
def _check_feature_types(self): if self.default_feature_type is not None and self.default_feature_type not in self.allowed_feature_types: raise ValueError() for feature_type in self.feature_collection: if feature_type is not None and feature_type not in self.allowed_feature_types: raise ValueError(.format(self.allowed_feature_types, feature_type))
Checks that feature types are a subset of allowed feature types. (`None` is handled :raises: ValueError
377,447
def update(self): stats = self.get_init_value() if self.input_method == : for k, v in iteritems(self.glances_amps.update()): stats.append({: k, : v.NAME, : v.result(), : v.refresh(), : v.time_until_refresh(), : v.count(), : v.count_min(), : v.count_max()}) else: pass self.stats = stats return self.stats
Update the AMP list.
377,448
def sync_objects_out(self, force=False): self.log() from ambry.bundle.files import BuildSourceFile self.dstate = self.STATES.BUILDING for f in self.build_source_files.list_records(): self.log(.format(f.record.path)) f.objects_to_record() self.commit()
Synchronize from objects to records, and records to files
377,449
def to_example_dict(encoder, inputs, mask, outputs): bases = [] input_ids = [] last_idx = -1 for row in np.argwhere(inputs): idx, base_id = row idx, base_id = int(idx), int(base_id) assert idx > last_idx while idx != last_idx + 1: bases.append(encoder.UNK) last_idx += 1 bases.append(encoder.BASES[base_id]) last_idx = idx assert len(inputs) == len(bases) input_ids = encoder.encode(bases) input_ids.append(text_encoder.EOS_ID) targets_mask = [float(v) for v in mask] targets = [float(v) for v in outputs.flatten()] targets_shape = [int(dim) for dim in outputs.shape] assert mask.shape[0] == outputs.shape[0] example_keys = ["inputs", "targets_mask", "targets", "targets_shape"] ex_dict = dict( zip(example_keys, [input_ids, targets_mask, targets, targets_shape])) return ex_dict
Convert single h5 record to an example dict.
377,450
def add_rednoise(psr,A,gamma,components=10,seed=None): if seed is not None: N.random.seed(seed) t = psr.toas() minx, maxx = N.min(t), N.max(t) x = (t - minx) / (maxx - minx) T = (day/year) * (maxx - minx) size = 2*components F = N.zeros((psr.nobs,size),) f = N.zeros(size,) for i in range(components): F[:,2*i] = N.cos(2*math.pi*(i+1)*x) F[:,2*i+1] = N.sin(2*math.pi*(i+1)*x) f[2*i] = f[2*i+1] = (i+1) / T norm = A**2 * year**2 / (12 * math.pi**2 * T) prior = norm * f**(-gamma) y = N.sqrt(prior) * N.random.randn(size) psr.stoas[:] += (1.0/day) * N.dot(F,y)
Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma, using `components` Fourier bases. Optionally take a pseudorandom-number-generator seed.
377,451
def get_getter(cls, prop_name, user_getter=None, getter_takes_name=False): if user_getter: if getter_takes_name: _deps = type(cls)._get_old_style_getter_deps(cls, prop_name, user_getter) def _getter(self, deps=_deps): return user_getter(self, prop_name) else: _getter = user_getter return _getter def _getter(self): return getattr(self, PROP_NAME % { : prop_name}) return _getter
Returns a function wich is a getter for a property. prop_name is the name off the property. user_getter is an optional function doing the work. If specified, that function will be called instead of getting the attribute whose name is in 'prop_name'. If user_getter is specified with a False value for getter_takes_name (default), than the method is used to get the value of the property. If True is specified for getter_takes_name, then the user_getter is called by passing the property name (i.e. it is considered a general method which receive the property name whose value has to be returned.)
377,452
def validate(self, **kwargs): default_data_schema = json.load(open(self.default_schema_file, )) data = kwargs.pop("data", None) file_path = kwargs.pop("file_path", None) if file_path is None: raise LookupError("file_path argument must be supplied") if data is None: try: data = yaml.load(open(file_path, ), Loader=Loader) except Exception as e: self.add_validation_message(ValidationMessage(file=file_path, message= + e.__str__())) return False try: if in data: custom_schema = self.load_custom_schema(data[]) json_validate(data, custom_schema) else: json_validate(data, default_data_schema) except ValidationError as ve: self.add_validation_message( ValidationMessage(file=file_path, message=ve.message + + str(ve.instance))) if self.has_errors(file_path): return False else: return True
Validates a data file :param file_path: path to file to be loaded. :param data: pre loaded YAML object (optional). :return: Bool to indicate the validity of the file.
377,453
def apply(self, data, path=None, applicator=None): if applicator: applicator.pset = self else: applicator = Applicator(self) return applicator.apply(data, path=path)
Apply permissions in this set to the provided data, effectively removing all keys from it are not permissioned to be viewed Arguments: data -- dict of data Returns: Cleaned data
377,454
def parse_iso8601(text): parsed = _parse_iso8601_duration(text) if parsed is not None: return parsed m = ISO8601_DT.match(text) if not m: raise ParserError("Invalid ISO 8601 string") ambiguous_date = False is_date = False is_time = False year = 0 month = 1 day = 1 minute = 0 second = 0 microsecond = 0 tzinfo = None if m: if m.group("date"): is_date = True if m.group("isocalendar"): if ( m.group("weeksep") and not m.group("weekdaysep") and m.group("isoweekday") ): raise ParserError("Invalid date string: {}".format(text)) if not m.group("weeksep") and m.group("weekdaysep"): raise ParserError("Invalid date string: {}".format(text)) try: date = _get_iso_8601_week( m.group("isoyear"), m.group("isoweek"), m.group("isoweekday") ) except ParserError: raise except ValueError: raise ParserError("Invalid date string: {}".format(text)) year = date["year"] month = date["month"] day = date["day"] else: year = int(m.group("year")) if not m.group("monthday"): month = 1 day = 1 else: if m.group("month") and m.group("day"): if not m.group("daysep") and len(m.group("day")) == 1: ordinal = int(m.group("month") + m.group("day")) leap = is_leap(year) months_offsets = MONTHS_OFFSETS[leap] if ordinal > months_offsets[13]: raise ParserError("Ordinal day is out of range") for i in range(1, 14): if ordinal <= months_offsets[i]: day = ordinal - months_offsets[i - 1] month = i - 1 break else: month = int(m.group("month")) day = int(m.group("day")) else: if not m.group("monthsep"): ambiguous_date = True month = int(m.group("month")) day = 1 if not m.group("time"): if ambiguous_date: hhmmss = "{}{:0>2}".format(str(year), str(month)) return datetime.time(int(hhmmss[:2]), int(hhmmss[2:4]), int(hhmmss[4:])) return datetime.date(year, month, day) if ambiguous_date: raise ParserError("Invalid date string: {}".format(text)) if is_date and not m.group("timesep"): raise ParserError("Invalid date string: {}".format(text)) if not is_date: is_time = True hour = int(m.group("hour")) minsep = m.group("minsep") if m.group("minute"): minute = int(m.group("minute")) elif minsep: raise ParserError("Invalid ISO 8601 time part") secsep = m.group("secsep") if secsep and not minsep and m.group("minute"): raise ParserError("Invalid ISO 8601 time part") if m.group("second"): if not secsep and minsep: raise ParserError("Invalid ISO 8601 time part") second = int(m.group("second")) elif secsep: raise ParserError("Invalid ISO 8601 time part") if m.group("subsecondsection"): subsecond = m.group("subsecond")[:6] microsecond = int("{:0<6}".format(subsecond)) tz = m.group("tz") if tz: if tz == "Z": offset = 0 else: negative = True if tz.startswith("-") else False tz = tz[1:] if ":" not in tz: if len(tz) == 2: tz = "{}00".format(tz) off_hour = tz[0:2] off_minute = tz[2:4] else: off_hour, off_minute = tz.split(":") offset = ((int(off_hour) * 60) + int(off_minute)) * 60 if negative: offset = -1 * offset tzinfo = FixedTimezone(offset) if is_time: return datetime.time(hour, minute, second, microsecond) return datetime.datetime( year, month, day, hour, minute, second, microsecond, tzinfo=tzinfo )
ISO 8601 compliant parser. :param text: The string to parse :type text: str :rtype: datetime.datetime or datetime.time or datetime.date
377,455
def config(filename): Config = collections.namedtuple(, [ , , , , , , , , ]) return [Config(**d) for d in _get_config_generator(filename)]
Construct `Config` object and return a list. :parse filename: A string containing the path to YAML file. :return: list
377,456
def process_pure_python(self, content): output = [] savefig = False multiline = False multiline_start = None fmtin = self.promptin ct = 0 for lineno, line in enumerate(content): line_stripped = line.strip() if not len(line): output.append(line) continue if line_stripped.startswith(): output.extend([line]) if in line: savefig = True continue if line_stripped.startswith(): output.extend([line]) continue continuation = u% .join([]*(len(str(ct))+2)) if not multiline: modified = u"%s %s" % (fmtin % ct, line_stripped) output.append(modified) ct += 1 try: ast.parse(line_stripped) output.append(u) except Exception: multiline = True multiline_start = lineno else: modified = u % (continuation, line) output.append(modified) try: mod = ast.parse( .join(content[multiline_start:lineno+1])) if isinstance(mod.body[0], ast.FunctionDef): for element in mod.body[0].body: if isinstance(element, ast.Return): multiline = False else: output.append(u) multiline = False except Exception: pass if savefig: self.ensure_pyplot() self.process_input_line(, store_history=False) self.clear_cout() savefig = False return output
content is a list of strings. it is unedited directive conent This runs it line by line in the InteractiveShell, prepends prompts as needed capturing stderr and stdout, then returns the content as a list as if it were ipython code
377,457
def config_unset(name, value_regex=None, repo=None, user=None, password=None, output_encoding=None, **kwargs): rbazbazfoofoo\..+ ret = {: name, : {}, : True, : } kwargs = salt.utils.args.clean_kwargs(**kwargs) global_ = kwargs.pop(, False) all_ = kwargs.pop(, False) if kwargs: return _fail( ret, salt.utils.args.invalid_kwargs(kwargs, raise_exc=False) ) if not global_ and not repo: return _fail( ret, repo\ ) if not isinstance(name, six.string_types): name = six.text_type(name) if value_regex is not None: if not isinstance(value_regex, six.string_types): value_regex = six.text_type(value_regex) key = + name.lstrip().rstrip() + pre_matches = __salt__[]( cwd=repo, key=key, value_regex=value_regex, user=user, password=password, ignore_retcode=True, output_encoding=output_encoding, **{: global_} ) if not pre_matches: return ret return ret
r''' .. versionadded:: 2015.8.0 Ensure that the named config key is not present name The name of the configuration key to unset. This value can be a regex, but the regex must match the entire key name. For example, ``foo\.`` would not match all keys in the ``foo`` section, it would be necessary to use ``foo\..+`` to do so. value_regex Regex indicating the values to unset for the matching key(s) .. note:: This option behaves differently depending on whether or not ``all`` is set to ``True``. If it is, then all values matching the regex will be deleted (this is the only way to delete multiple values from a multivar). If ``all`` is set to ``False``, then this state will fail if the regex matches more than one value in a multivar. all : False If ``True``, unset all matches repo Location of the git repository for which the config value should be set. Required unless ``global`` is set to ``True``. user User under which to run git commands. By default, commands are run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 global : False If ``True``, this will set a global git config option output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 **Examples:** .. code-block:: yaml # Value matching 'baz' mylocalrepo: git.config_unset: - name: foo.bar - value_regex: 'baz' - repo: /path/to/repo # Ensure entire multivar is unset mylocalrepo: git.config_unset: - name: foo.bar - all: True # Ensure all variables in 'foo' section are unset, including multivars mylocalrepo: git.config_unset: - name: 'foo\..+' - all: True # Ensure that global config value is unset mylocalrepo: git.config_unset: - name: foo.bar - global: True
377,458
def simple_memoize(callable_object): cache = dict() def wrapper(*rest): if rest not in cache: cache[rest] = callable_object(*rest) return cache[rest] return wrapper
Simple memoization for functions without keyword arguments. This is useful for mapping code objects to module in this context. inspect.getmodule() requires a number of system calls, which may slow down the tracing considerably. Caching the mapping from code objects (there is *one* code object for each function, regardless of how many simultaneous activations records there are). In this context we can ignore keyword arguments, but a generic memoizer ought to take care of that as well.
377,459
def _get_friends_count(session, user_id): response = session.fetch(, user_id=user_id, count=1) return response["count"]
https://vk.com/dev/friends.get
377,460
def get_properties(self, mode, name_list=None): assert mode in ("allprop", "name", "named") if mode in ("allprop", "name"): assert name_list is None name_list = self.get_property_names(mode == "allprop") else: assert name_list is not None propList = [] namesOnly = mode == "name" for name in name_list: try: if namesOnly: propList.append((name, None)) else: value = self.get_property_value(name) propList.append((name, value)) except DAVError as e: propList.append((name, e)) except Exception as e: propList.append((name, as_DAVError(e))) if self.provider.verbose >= 2: traceback.print_exc(10, sys.stdout) return propList
Return properties as list of 2-tuples (name, value). If mode is 'name', then None is returned for the value. name the property name in Clark notation. value may have different types, depending on the status: - string or unicode: for standard property values. - etree.Element: for complex values. - DAVError in case of errors. - None: if mode == 'name'. @param mode: "allprop", "name", or "named" @param name_list: list of property names in Clark Notation (required for mode 'named') This default implementation basically calls self.get_property_names() to get the list of names, then call self.get_property_value on each of them.
377,461
def handle_pkg_optional_fields(self, package, package_node): self.handle_package_literal_optional(package, package_node, self.spdx_namespace.versionInfo, ) self.handle_package_literal_optional(package, package_node, self.spdx_namespace.packageFileName, ) self.handle_package_literal_optional(package, package_node, self.spdx_namespace.supplier, ) self.handle_package_literal_optional(package, package_node, self.spdx_namespace.originator, ) self.handle_package_literal_optional(package, package_node, self.spdx_namespace.sourceInfo, ) self.handle_package_literal_optional(package, package_node, self.spdx_namespace.licenseComments, ) self.handle_package_literal_optional(package, package_node, self.spdx_namespace.summary, ) self.handle_package_literal_optional(package, package_node, self.spdx_namespace.description, ) if package.has_optional_field(): checksum_node = self.create_checksum_node(package.check_sum) self.graph.add((package_node, self.spdx_namespace.checksum, checksum_node)) if package.has_optional_field(): homepage_node = URIRef(self.to_special_value(package.homepage)) homepage_triple = (package_node, self.doap_namespace.homepage, homepage_node) self.graph.add(homepage_triple)
Write package optional fields.
377,462
def check_num_columns_in_param_list_arrays(param_list): try: num_columns = param_list[0].shape[1] assert all([x is None or (x.shape[1] == num_columns) for x in param_list]) except AssertionError: msg = "param_list arrays should have equal number of columns." raise ValueError(msg) return None
Ensure that each array in param_list, that is not None, has the same number of columns. Raises a helpful ValueError if otherwise. Parameters ---------- param_list : list of ndarrays or None. Returns ------- None.
377,463
def reverse(self, search: str): url_path = "/api/reverse/{search}".format(search=search) return self._request(path=url_path)
Return reverse DNS lookup information we have for the given IPv{4,6} address with history of changes. Multiple reverse DNS entries may match. We return all of them.
377,464
def delete_folder(self, folder_id, folder_etag=None, recursive=None): return self( join(, folder_id), dict(recursive=recursive), method=, headers={: folder_etag} if folder_etag else dict() )
Delete specified folder. Pass folder_etag to avoid race conditions (raises error 412). recursive keyword does just what it says on the tin.
377,465
def sieve(self, name=None, sample_rate=None, sample_range=None, exact_match=False, **others): if isinstance(name, Pattern): flags = name.flags name = name.pattern else: flags = 0 if exact_match: name = name if name.startswith(r) else r"\A%s" % name name = name if name.endswith(r) else r"%s\Z" % name name_regexp = re.compile(name, flags=flags) matched = list(self) if name is not None: matched = [entry for entry in matched if name_regexp.search(entry.name) is not None] if sample_rate is not None: sample_rate = (sample_rate.value if isinstance(sample_rate, units.Quantity) else float(sample_rate)) matched = [entry for entry in matched if entry.sample_rate and entry.sample_rate.value == sample_rate] if sample_range is not None: matched = [entry for entry in matched if sample_range[0] <= entry.sample_rate.value <= sample_range[1]] for attr, val in others.items(): if val is not None: matched = [entry for entry in matched if (hasattr(entry, attr) and getattr(entry, attr) == val)] return self.__class__(matched)
Find all `Channels <Channel>` in this list matching the specified criteria. Parameters ---------- name : `str`, or regular expression any part of the channel name against which to match (or full name if `exact_match=False` is given) sample_rate : `float` rate (number of samples per second) to match exactly sample_range : 2-`tuple` `[low, high]` closed interval or rates to match within exact_match : `bool` return channels matching `name` exactly, default: `False` Returns ------- new : `ChannelList` a new `ChannelList` containing the matching channels
377,466
def _on_connection_close(self, connection, reply_code_or_reason, reply_text=None): self._channel = None if isinstance(reply_code_or_reason, pika_errs.ConnectionClosed): reply_code = reply_code_or_reason.reply_code reply_text = reply_code_or_reason.reply_text elif isinstance(reply_code_or_reason, int): reply_code = reply_code_or_reason else: reply_code = 0 reply_text = str(reply_code_or_reason) if reply_code == 200: _log.info("Server connection closed (%s), shutting down", reply_text) connection.ioloop.stop() else: _log.warning( "Connection to %s closed unexpectedly (%d): %s", connection.params.host, reply_code, reply_text, ) self.call_later(1, self.reconnect)
Callback invoked when a previously-opened connection is closed. Args: connection (pika.connection.SelectConnection): The connection that was just closed. reply_code_or_reason (int|Exception): The reason why the channel was closed. In older versions of pika, this is the AMQP code. reply_text (str): The human-readable reason the connection was closed (only in older versions of pika)
377,467
def filepattern(self, *args, **kwargs): return [p.filepattern(*args, **kwargs) for p in self.problems]
Returns a list of filepatterns, one for each problem.
377,468
def main(argv=None): print (transliterate(, , )) if argv is None: argv = sys.argv try: text, inFormat, outFormat = argv[1:4] except ValueError: print (main.__doc__) return 2 inFormat = inFormat.upper() outFormat = outFormat.upper() try: f = open(text) except IOError: i = i + 1 f.close() return 0
Call transliterator from a command line. python transliterator.py text inputFormat outputFormat ... writes the transliterated text to stdout text -- the text to be transliterated OR the name of a file containing the text inputFormat -- the name of the character block or transliteration scheme that the text is to be transliterated FROM, e.g. 'CYRILLIC', 'IAST'. Not case-sensitive outputFormat -- the name of the character block or transliteration scheme that the text is to be transliterated TO, e.g. 'CYRILLIC', 'IAST'. Not case-sensitive
377,469
def get_commits(self, repo, organization=): path = ( + organization + + repo.name + ) is_only_today = False if not os.path.exists(path): all_commits = repo.iter_commits() is_only_today = True else: files = os.listdir(path) date = str(files[-1][:-5]) if date == str(datetime.date.today()): if len(files) > 2: date = str(files[-2][:-5]) else: all_commits = repo.iter_commits() is_only_today = True if not is_only_today: all_commits = repo.iter_commits(since=date) for commit in all_commits: self.commits_json[repo.name].append(commit.to_json()) count = 0 for commit in repo.iter_commits(): count += 1 return count
Retrieves the number of commits to a repo in the organization. If it is the first time getting commits for a repo, it will get all commits and save them to JSON. If there are previous commits saved, it will only get commits that have not been saved to disk since the last date of commits.
377,470
def get(self, request, path): if path == : config = { : request.build_absolute_uri(), : self.meteor_settings.get(, {}), : request.build_absolute_uri( % ( self.runtime_config.get(, ), ), ), : , } if config[].startswith() \ and settings.SECURE_SSL_REDIRECT: config[] = % ( config[].split(, 1)[1], ) config.update(self.runtime_config) return HttpResponse( % dumps(config), content_type=, ) try: file_path, content_type = self.url_map[path] with open(file_path, ) as content: return HttpResponse( content.read(), content_type=content_type, ) except KeyError: return HttpResponse(self.html)
Return HTML (or other related content) for Meteor.
377,471
def send_to_azure(instance, data, thread_number, sub_commit, table_info, nb_threads): rows = data["rows"] if not rows: return 0 columns_name = data["columns_name"] table_name = data["table_name"] + "_" + str(thread_number) print(C.HEADER + "Create table %s..." % table_name + C.ENDC) create_table_from_info(instance, table_info, table_name) print(C.OKGREEN + "Create table %s...OK" % table_name + C.ENDC) small_batch_size = int(2099 / len(columns_name)) cnxn = connect(instance) cursor = cnxn.cursor() boolean = True total_rows = len(rows) question_mark_pattern = "(%s)" % ",".join(["?" for i in range(len(rows[0]))]) counter = 0 while boolean: temp_row = [] question_mark_list = [] for i in range(small_batch_size): if rows: temp_row.append(rows.pop()) question_mark_list.append(question_mark_pattern) else: boolean = False continue counter = counter + len(temp_row) threads_state = eval(read_file("threads_state_%s" % str(thread_number))) threads_state["iteration"] = counter write_in_file("threads_state_%s" % str(thread_number), str(threads_state)) if sub_commit: suffix = "rows sent" else: suffix = "rows prepared to be sent" print_progress_bar_multi_threads(nb_threads, suffix=suffix) data_values_str = .join(question_mark_list) columns_name_str = ", ".join(columns_name) inserting_request = % (table_name, columns_name_str, data_values_str) final_data = [y for x in temp_row for y in x] if final_data: cursor.execute(inserting_request, final_data) if sub_commit: commit_function(cnxn) if not sub_commit: commit_function(cnxn) cursor.close() cnxn.close() return 0
data = { "table_name" : 'name_of_the_azure_schema' + '.' + 'name_of_the_azure_table' #Must already exist, "columns_name" : [first_column_name,second_column_name,...,last_column_name], "rows" : [[first_raw_value,second_raw_value,...,last_raw_value],...] }
377,472
def get_function(self, name): p = ffi.lib.LLVMPY_GetNamedFunction(self, _encode_string(name)) if not p: raise NameError(name) return ValueRef(p, , dict(module=self))
Get a ValueRef pointing to the function named *name*. NameError is raised if the symbol isn't found.
377,473
def handle_ChannelClose(self, frame): self.sender.send_CloseOK() exc = exceptions._get_exception_type(frame.payload.reply_code) self._close_all(exc)
AMQP server closed the channel with an error
377,474
def factory(ec, code=None, token=None, refresh=None, **kwargs): TTYPE = {: , : , : } args = {} if code: args[] = init_token_handler(ec, code, TTYPE[]) if token: args[] = init_token_handler(ec, token, TTYPE[]) if refresh: args[] = init_token_handler(ec, token, TTYPE[]) return TokenHandler(**args)
Create a token handler :param code: :param token: :param refresh: :return: TokenHandler instance
377,475
def __get_untitled_file_name(self): untitledNameId = Editor._Editor__untitled_name_id for file in self.list_files(): if not os.path.dirname(file) == self.__default_session_directory: continue search = re.search(r"\d+", os.path.basename(file)) if not search: continue untitledNameId = max(int(search.group(0)), untitledNameId) + 1 name = "{0} {1}.{2}".format(self.__default_file_name, untitledNameId, self.__default_file_extension) Editor._Editor__untitled_name_id += 1 LOGGER.debug("> Next untitled file name: .".format(name)) return name
Returns an untitled file name. :return: Untitled file name. :rtype: unicode
377,476
def create_dataset(self, name, **kwargs): return self._write_op(self._create_dataset_nosync, name, **kwargs)
Create an array. Parameters ---------- name : string Array name. data : array_like, optional Initial data. shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. If not provided, will be guessed from `shape` and `dtype`. dtype : string or dtype, optional NumPy dtype. compressor : Codec, optional Primary compressor. fill_value : object Default value to use for uninitialized portions of the array. order : {'C', 'F'}, optional Memory layout to be used within each chunk. synchronizer : zarr.sync.ArraySynchronizer, optional Array synchronizer. filters : sequence of Codecs, optional Sequence of filters to use to encode chunk data prior to compression. overwrite : bool, optional If True, replace any existing array or group with the given name. cache_metadata : bool, optional If True, array configuration metadata will be cached for the lifetime of the object. If False, array metadata will be reloaded prior to all data access and modification operations (may incur overhead depending on storage and data access pattern). Returns ------- a : zarr.core.Array Examples -------- >>> import zarr >>> g1 = zarr.group() >>> d1 = g1.create_dataset('foo', shape=(10000, 10000), ... chunks=(1000, 1000)) >>> d1 <zarr.core.Array '/foo' (10000, 10000) float64> >>> d2 = g1.create_dataset('bar/baz/qux', shape=(100, 100, 100), ... chunks=(100, 10, 10)) >>> d2 <zarr.core.Array '/bar/baz/qux' (100, 100, 100) float64>
377,477
def _verify(function): def wrapped(pin, *args, **kwargs): pin = int(pin) if pin not in _open: ppath = gpiopath(pin) if not os.path.exists(ppath): log.debug("Creating Pin {0}".format(pin)) with _export_lock: with open(pjoin(gpio_root, ), ) as f: _write(f, pin) value = open(pjoin(ppath, ), FMODE) direction = open(pjoin(ppath, ), FMODE) _open[pin] = PinState(value=value, direction=direction) return function(pin, *args, **kwargs) return wrapped
decorator to ensure pin is properly set up
377,478
def get_type(self): if self.type_idx_value == None: self.type_idx_value = self.CM.get_type(self.type_idx) return self.type_idx_value
Return the type of the field :rtype: string
377,479
async def issue_cmd(self, cmd, value, retry=3): async with self._cmd_lock: if not self.connected: _LOGGER.debug( "Serial transport closed, not sending command %s", cmd) return while not self._cmdq.empty(): _LOGGER.debug("Clearing leftover message from command queue:" " %s", await self._cmdq.get()) _LOGGER.debug("Sending command: %s with value %s", cmd, value) self.transport.write( .format(cmd, value).encode()) if cmd == OTGW_CMD_REPORT: expect = r.format(cmd, value) else: expect = r.format(cmd) async def send_again(err): nonlocal retry _LOGGER.warning("Command %s failed with %s, retrying...", cmd, err) retry -= 1 self.transport.write( .format(cmd, value).encode()) async def process(msg): _LOGGER.debug("Got possible response for command %s: %s", cmd, msg) if msg in OTGW_ERRS: if retry == 0: raise OTGW_ERRS[msg] await send_again(msg) return if cmd == OTGW_CMD_MODE and value == : while not re.match( r, msg): msg = await self._cmdq.get() return True match = re.match(expect, msg) if match: if match.group(1) in OTGW_ERRS: if retry == 0: raise OTGW_ERRS[match.group(1)] await send_again(msg) return ret = match.group(1) if cmd == OTGW_CMD_SUMMARY and ret == : part2 = await self._cmdq.get() ret = [ret, part2] return ret if re.match(r, msg): _LOGGER.warning("Received %s. If this happens during a " "reset of the gateway it can be safely " "ignored.", msg) return _LOGGER.warning("Unknown message in command queue: %s", msg) await send_again(msg) while True: msg = await self._cmdq.get() ret = await process(msg) if ret is not None: return ret
Issue a command, then await and return the return value. This method is a coroutine
377,480
def respond(self, text, sessionID = "general"): text = self.__normalize(text) previousText = self.__normalize(self.conversation[sessionID][-2]) text_correction = self.__correction(text) current_topic = self.topic[sessionID] current_topic_order = current_topic.split(".") while current_topic_order: try:return self.__response_on_topic(text, previousText, text_correction, current_topic, sessionID) except ValueError as e:pass current_topic_order.pop() current_topic = ".".join(current_topic_order) try:return self.__response_on_topic(text, previousText, text_correction, current_topic, sessionID) except ValueError as e:return "Sorry I couldn't find anything relevant"
Generate a response to the user input. :type text: str :param text: The string to be mapped :rtype: str
377,481
def read_igor_D_gene_parameters(params_file_name): params_file = open(params_file_name, ) D_gene_info = {} in_D_gene_sec = False for line in params_file: if line.startswith(): in_D_gene_sec = True elif in_D_gene_sec: if line[0] == : split_line = line[1:].split() D_gene_info[split_line[0]] = [split_line[1] , int(split_line[2])] else: break params_file.close() genD = [[]]*len(D_gene_info.keys()) for D_gene in D_gene_info.keys(): genD[D_gene_info[D_gene][1]] = [D_gene, D_gene_info[D_gene][0]] return genD
Load genD from file. genD is a list of genomic D information. Each element is a list of the name of the D allele and the germline sequence. Parameters ---------- params_file_name : str File name for a IGOR parameter file. Returns ------- genD : list List of genomic D information.
377,482
def _scaled_int(s): r s = bytearray(s) sign = 1 - ((s[0] & 0x80) >> 6) int_val = (((s[0] & 0x7f) << 16) | (s[1] << 8) | s[2]) log.debug(, .join(hex(c) for c in s), int_val, sign) return (sign * int_val) / 10000.
r"""Convert a 3 byte string to a signed integer value.
377,483
def emit_java_headers(target, source, env): class_suffix = env.get(, ) classdir = env.get() if not classdir: try: s = source[0] except IndexError: classdir = else: try: classdir = s.attributes.java_classdir except AttributeError: classdir = classdir = env.Dir(classdir).rdir() if str(classdir) == : c_ = None else: c_ = str(classdir) + os.sep slist = [] for src in source: try: classname = src.attributes.java_classname except AttributeError: classname = str(src) if c_ and classname[:len(c_)] == c_: classname = classname[len(c_):] if class_suffix and classname[-len(class_suffix):] == class_suffix: classname = classname[:-len(class_suffix)] classname = SCons.Tool.javac.classname(classname) s = src.rfile() s.attributes.java_classname = classname slist.append(s) s = source[0].rfile() if not hasattr(s.attributes, ): s.attributes.java_classdir = classdir if target[0].__class__ is SCons.Node.FS.File: tlist = target else: if not isinstance(target[0], SCons.Node.FS.Dir): target[0].__class__ = SCons.Node.FS.Dir target[0]._morph() tlist = [] for s in source: fname = s.attributes.java_classname.replace(, ) + t = target[0].File(fname) t.attributes.java_lookupdir = target[0] tlist.append(t) return tlist, source
Create and return lists of Java stub header files that will be created from a set of class files.
377,484
def use_astropy_helpers(**kwargs): global BOOTSTRAPPER config = BOOTSTRAPPER.config config.update(**kwargs) BOOTSTRAPPER = _Bootstrapper(**config) BOOTSTRAPPER.run()
Ensure that the `astropy_helpers` module is available and is importable. This supports automatic submodule initialization if astropy_helpers is included in a project as a git submodule, or will download it from PyPI if necessary. Parameters ---------- path : str or None, optional A filesystem path relative to the root of the project's source code that should be added to `sys.path` so that `astropy_helpers` can be imported from that path. If the path is a git submodule it will automatically be initialized and/or updated. The path may also be to a ``.tar.gz`` archive of the astropy_helpers source distribution. In this case the archive is automatically unpacked and made temporarily available on `sys.path` as a ``.egg`` archive. If `None` skip straight to downloading. download_if_needed : bool, optional If the provided filesystem path is not found an attempt will be made to download astropy_helpers from PyPI. It will then be made temporarily available on `sys.path` as a ``.egg`` archive (using the ``setup_requires`` feature of setuptools. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. index_url : str, optional If provided, use a different URL for the Python package index than the main PyPI server. use_git : bool, optional If `False` no git commands will be used--this effectively disables support for git submodules. If the ``--no-git`` option is given at the command line the value of this argument is overridden to `False`. auto_upgrade : bool, optional By default, when installing a package from a non-development source distribution ah_boostrap will try to automatically check for patch releases to astropy-helpers on PyPI and use the patched version over any bundled versions. Setting this to `False` will disable that functionality. If the ``--offline`` option is given at the command line the value of this argument is overridden to `False`. offline : bool, optional If `False` disable all actions that require an internet connection, including downloading packages from the package index and fetching updates to any git submodule. Defaults to `True`.
377,485
def compare_values(values0, values1): values0 = {v[0]: v[1:] for v in values0} values1 = {v[0]: v[1:] for v in values1} created = [(k, v[0], v[1]) for k, v in values1.items() if k not in values0] deleted = [(k, v[0], v[1]) for k, v in values0.items() if k not in values1] modified = [(k, v[0], v[1]) for k, v in values0.items() if v != values1.get(k, None)] return created, deleted, modified
Compares all the values of a single registry key.
377,486
def calculate_mvgd_stats(nw): omega = 2 * pi * 50 nw.control_circuit_breakers(mode=) trafos_dict = {} generators_dict = {} branches_dict = {} ring_dict = {} LA_dict = {} other_nodes_dict = {} lv_branches_dict = {} trafos_idx = 0 gen_idx = 0 branch_idx = 0 ring_idx = 0 LA_idx = 0 lv_branches_idx = 0 for district in nw.mv_grid_districts(): root = district.mv_grid.station() mv_impedances = {} mvlv_impedances = {} mv_path_lengths = {} mvlv_path_lengths = {} mv_thermal_limits = {} lv_thermal_limits = {} mvlv_thermal_limits = {} n_outgoing_LV = 0 n_stations_LV = 0 n_outgoing_MV = 0 G = district.mv_grid._graph for node in G.nodes(): if isinstance(node, MVStationDing0): n_outgoing_MV += len(list(G.neighbors(node))) continue mv_impedance = 0 mv_path_length = 0 if not isinstance(node, MVCableDistributorDing0) and not isinstance(node, CircuitBreakerDing0): if not nx.has_path(G, root, node): continue else: path = nx.shortest_path(G, root, node) for i in range(len(path) - 1): mv_impedance += np.sqrt( (G.adj[path[i]][path[i + 1]][].type[ ] * 1e-3 * omega * \ G.adj[path[i]][path[i + 1]][ ].length) ** 2. + \ (G.adj[path[i]][path[i + 1]][].type[ ] * \ G.adj[path[i]][path[i + 1]][ ].length) ** 2.) mv_path_length += G.adj[path[i]][path[i + 1]][ ].length mv_impedances[node] = mv_impedance mv_path_lengths[node] = mv_path_length mv_thermal_limit = G.adj[path[0]][path[1]][].type[] mv_thermal_limits[node] = mv_thermal_limit if isinstance(node, LVStationDing0): lvstation_impedance = 0. for trafo in node.transformers(): lvstation_impedance += 1. / np.hypot(trafo.r,trafo.x) if lvstation_impedance > 0.: lvstation_impedance = 1. / lvstation_impedance else: lvstation_impedance = 0. for lv_LA in district.lv_load_areas(): for lv_dist in lv_LA.lv_grid_districts(): if lv_dist.lv_grid._station == node: G_lv = lv_dist.lv_grid._graph for lv_node in G_lv.nodes(): if isinstance(lv_node, GeneratorDing0) or isinstance(lv_node, LVLoadDing0): path = nx.shortest_path(G_lv, node, lv_node) lv_impedance = lvstation_impedance lv_path_length = 0. for i in range(len(path)-1): lv_impedance += np.sqrt((G_lv.adj[path[i]][path[i+1]][].type[] * 1e-3 * omega * \ G_lv.adj[path[i]][path[i+1]][].length)**2. + \ (G_lv.adj[path[i]][path[i+1]][].type[] * \ G_lv.adj[path[i]][path[i+1]][].length)**2.) lv_path_length += G_lv.adj[path[i]][path[i+1]][].length lv_thermal_limit = G_lv.adj[path[0]][path[1]][].type[] mvlv_impedances[lv_node] = mv_impedance + lv_impedance mvlv_path_lengths[lv_node] = mv_path_length + lv_path_length lv_thermal_limits[lv_node] = lv_thermal_limit mvlv_thermal_limits[lv_node] = mv_thermal_limit elif isinstance(lv_node, LVStationDing0): n_outgoing_LV += len(list(G_lv.neighbors(lv_node))) n_stations_LV += 1 sum_impedances = 0. sum_thermal_limits = 0. sum_path_lengths = 0. n_terminal_nodes_MV = 0 for terminal_node in mv_impedances.keys(): if not isinstance(terminal_node, LVStationDing0) and not isinstance(terminal_node, MVStationDing0): sum_impedances += mv_impedances[terminal_node] sum_thermal_limits += mv_thermal_limits[terminal_node] sum_path_lengths += mv_path_lengths[terminal_node] n_terminal_nodes_MV += 1 sum_thermal_limits_LV = 0. n_terminal_nodes_LV = 0 for terminal_node in mvlv_impedances.keys(): sum_impedances += mvlv_impedances[terminal_node] sum_thermal_limits += mvlv_thermal_limits[terminal_node] sum_thermal_limits_LV += lv_thermal_limits[terminal_node] sum_path_lengths += mvlv_path_lengths[terminal_node] n_terminal_nodes_LV += 1 n_terminal_nodes = n_terminal_nodes_MV + n_terminal_nodes_LV if n_terminal_nodes < 1: mean_impedance = np.nan mean_thermal_limit = np.nan mean_path_length = np.nan else: mean_impedance = sum_impedances / n_terminal_nodes mean_thermal_limit = sum_thermal_limits / n_terminal_nodes mean_path_length = sum_path_lengths / n_terminal_nodes if n_terminal_nodes_LV < 1: mean_thermal_limit_LV = np.nan else: mean_thermal_limit_LV = sum_thermal_limits_LV / n_terminal_nodes_LV number_outgoing_LV = n_outgoing_LV number_outgoing_MV = n_outgoing_MV max_mv_path = 0 max_mvlv_path = 0 nodes_in_rings = [] branches_in_rings = [] for ring in district.mv_grid.rings_full_data(): ring_idx += 1 ring_gen = 0 for node in ring[2]: nodes_in_rings.append(node) if isinstance(node, GeneratorDing0): ring_gen += node.capacity ring_length = 0 for branch in ring[1]: branches_in_rings.append(branch) ring_length += branch.length / 1e3 ring_dict[ring_idx] = { : district.mv_grid.id_db, : ring_length, : ring_gen, } for trafo in district.mv_grid.station().transformers(): trafos_idx += 1 trafos_dict[trafos_idx] = { : district.mv_grid.id_db, : trafo.s_max_a} cd_count = 0 LVs_count = 0 cb_count = 0 lv_trafo_count = 0 lv_trafo_cap = 0 for node in district.mv_grid._graph.nodes(): mv_path_length = 0 mvlv_path_length = 0 if isinstance(node, GeneratorDing0): gen_idx += 1 isolation = not node in nodes_in_rings subtype = node.subtype if subtype == None: subtype = generators_dict[gen_idx] = { : district.mv_grid.id_db, : node.type, : node.type + + subtype, : node.capacity, : node.v_level, : isolation, } mv_path_length = district.mv_grid.graph_path_length( node_source=root, node_target=node) elif isinstance(node, MVCableDistributorDing0): cd_count += 1 elif isinstance(node, LVStationDing0): LVs_count += 1 lv_trafo_count += len([trafo for trafo in node.transformers()]) lv_trafo_cap += np.sum([trafo.s_max_a for trafo in node.transformers()]) if not node.lv_load_area.is_aggregated: mv_path_length = district.mv_grid.graph_path_length( node_source=root, node_target=node) max_lv_path = 0 for lv_LA in district.lv_load_areas(): for lv_dist in lv_LA.lv_grid_districts(): if lv_dist.lv_grid._station == node: for lv_node in lv_dist.lv_grid._graph.nodes(): lv_path_length = lv_dist.lv_grid.graph_path_length( node_source=node, node_target=lv_node) max_lv_path = max(max_lv_path, lv_path_length) mvlv_path_length = mv_path_length + max_lv_path elif isinstance(node, CircuitBreakerDing0): cb_count += 1 max_mv_path = max(max_mv_path, mv_path_length / 1000) max_mvlv_path = max(max_mvlv_path, mvlv_path_length / 1000) other_nodes_dict[district.mv_grid.id_db] = { : cd_count, : LVs_count, : cb_count, : lv_trafo_count, : lv_trafo_cap, : max_mv_path, : max_mvlv_path, : mean_impedance, : mean_thermal_limit, : mean_thermal_limit_LV, : mean_path_length / 1.e3, : number_outgoing_LV, : number_outgoing_MV } for branch in district.mv_grid.graph_edges(): branch_idx += 1 br_in_ring = branch[] in branches_in_rings branches_dict[branch_idx] = { : district.mv_grid.id_db, : branch[].length / 1e3, : branch[].type[], : branch[].kind, : br_in_ring, } for LA in district.lv_load_areas(): LA_idx += 1 LA_dict[LA_idx] = { : district.mv_grid.id_db, : LA.is_aggregated, : LA.is_satellite, } LA_pop = 0 residential_peak_load = 0 retail_peak_load = 0 industrial_peak_load = 0 agricultural_peak_load = 0 lv_gen_level_6 = 0 lv_gen_level_7 = 0 for lv_district in LA.lv_grid_districts(): LA_pop = + lv_district.population residential_peak_load += lv_district.peak_load_residential retail_peak_load += lv_district.peak_load_retail industrial_peak_load += lv_district.peak_load_industrial agricultural_peak_load += lv_district.peak_load_agricultural for g in lv_district.lv_grid.generators(): if g.v_level == 6: lv_gen_level_6 += g.capacity elif g.v_level == 7: lv_gen_level_7 += g.capacity for br in lv_district.lv_grid.graph_edges(): lv_branches_idx += 1 lv_branches_dict[lv_branches_idx] = { : district.mv_grid.id_db, : br[].length / 1e3, : br[].type.to_frame().columns[0], : br[].kind, } LA_dict[LA_idx].update({ : LA_pop, : residential_peak_load, : retail_peak_load, : industrial_peak_load, : agricultural_peak_load, : residential_peak_load + retail_peak_load + \ industrial_peak_load + agricultural_peak_load, : lv_gen_level_6 + lv_gen_level_7, : lv_gen_level_6, : lv_gen_level_7, }) proj = partial( pyproj.transform, pyproj.Proj(init=), pyproj.Proj(init=)) district_geo = transform(proj, district.geo_data) other_nodes_dict[district.mv_grid.id_db].update({: district_geo.area}) mvgd_stats = pd.DataFrame.from_dict({}, orient=) trafos_df = pd.DataFrame.from_dict(trafos_dict, orient=) generators_df = pd.DataFrame.from_dict(generators_dict, orient=) other_nodes_df = pd.DataFrame.from_dict(other_nodes_dict, orient=) branches_df = pd.DataFrame.from_dict(branches_dict, orient=) lv_branches_df = pd.DataFrame.from_dict(lv_branches_dict, orient=) ring_df = pd.DataFrame.from_dict(ring_dict, orient=) LA_df = pd.DataFrame.from_dict(LA_dict, orient=) if not trafos_df.empty: mvgd_stats = pd.concat([mvgd_stats, trafos_df.groupby().count()[]], axis=1) mvgd_stats = pd.concat([mvgd_stats, trafos_df.groupby().sum()[[]]], axis=1) mvgd_stats.columns = [, ] if not generators_df.empty: mv_generation = generators_df.groupby([, ])[].sum().to_frame().unstack(level=-1) mv_generation.columns = [ + _[1] if isinstance(_, tuple) else _ for _ in mv_generation.columns] mvgd_stats = pd.concat([mvgd_stats, mv_generation], axis=1) mv_generation = generators_df.groupby( [, ])[].sum().to_frame().unstack(level=-1) mv_generation.columns = [ + str(_[1]) if isinstance(_, tuple) else _ for _ in mv_generation.columns] mvgd_stats = pd.concat([mvgd_stats, mv_generation], axis=1) mv_generation = generators_df[generators_df[]].groupby( [])[].count().to_frame() mv_generation.columns = [] mvgd_stats = pd.concat([mvgd_stats, mv_generation], axis=1) if not other_nodes_df.empty: mvgd_stats[] = other_nodes_df[].to_frame().astype(int) mvgd_stats[] = other_nodes_df[].to_frame().astype(int) mvgd_stats[] = other_nodes_df[].to_frame().astype(int) mvgd_stats[] = other_nodes_df[].to_frame() mvgd_stats[] = other_nodes_df[].to_frame().astype(int) mvgd_stats[] = other_nodes_df[].to_frame() mvgd_stats[] = other_nodes_df[].to_frame() mvgd_stats[] = other_nodes_df[].to_frame() mvgd_stats[] = \ other_nodes_df[].to_frame() mvgd_stats[] = \ other_nodes_df[].to_frame() mvgd_stats[] = \ other_nodes_df[].to_frame() mvgd_stats[] = \ other_nodes_df[].to_frame() mvgd_stats[] = \ other_nodes_df[].to_frame() mvgd_stats[] = \ other_nodes_df[].to_frame() if not branches_df.empty: branches_data = branches_df[branches_df[] == ].groupby( [])[].sum().to_frame() branches_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, branches_data], axis=1) branches_data = branches_df[branches_df[] == ].groupby( [])[].sum().to_frame() branches_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, branches_data], axis=1) branches_data = branches_df.groupby( [, ])[].sum().to_frame().unstack(level=-1) branches_data.columns = [ + _[1] if isinstance(_, tuple) else _ for _ in branches_data.columns] mvgd_stats = pd.concat([mvgd_stats, branches_data], axis=1) total_br = branches_df.groupby([])[].count().to_frame() ring_br = branches_df[branches_df[]].groupby( [])[].count().to_frame() branches_data = total_br - ring_br total_br.columns = [] mvgd_stats = pd.concat([mvgd_stats, total_br], axis=1) branches_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, branches_data], axis=1) if not lv_branches_df.empty: lv_branches_data = lv_branches_df[lv_branches_df[] == ].groupby( [])[].sum().to_frame() lv_branches_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, lv_branches_data], axis=1) lv_branches_data = lv_branches_df[lv_branches_df[] == ].groupby( [])[].sum().to_frame() lv_branches_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, lv_branches_data], axis=1) lv_branches_data = lv_branches_df.groupby( [, ])[].sum().to_frame().unstack(level=-1) lv_branches_data.columns = [ + _[1] if isinstance(_, tuple) else _ for _ in lv_branches_data.columns] mvgd_stats = pd.concat([mvgd_stats, lv_branches_data], axis=1) total_lv_br = lv_branches_df.groupby([])[].count().to_frame() total_lv_br.columns = [] mvgd_stats = pd.concat([mvgd_stats, total_lv_br], axis=1) if not ring_df.empty: ring_data = ring_df.groupby([])[].count().to_frame() ring_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1) ring_data = ring_df.groupby([])[].min().to_frame() ring_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1) ring_data = ring_df.groupby([])[].max().to_frame() ring_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1) ring_data = ring_df.groupby([])[].mean().to_frame() ring_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1) ring_data = ring_df.groupby([])[].sum().to_frame() ring_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1) non_ring_data = branches_df.groupby([])[].sum().to_frame() non_ring_data.columns = [] ring_data = non_ring_data - ring_data ring_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, ring_data.round(1).abs()], axis=1) ring_data = ring_df.groupby([])[].sum().to_frame() ring_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, ring_data], axis=1) if not LA_df.empty: LA_data = LA_df.groupby([])[].count().to_frame() LA_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, LA_data], axis=1) LA_data = LA_df.groupby([])[, , , , , , , , ].sum() LA_data.columns = [, , , , , , , , , ] mvgd_stats = pd.concat([mvgd_stats, LA_data], axis=1) if not LA_df.empty: agg_LA_data = LA_df[LA_df[]].groupby( [])[].count().to_frame() agg_LA_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, agg_LA_data], axis=1) sat_LA_data = LA_df[LA_df[]].groupby( [])[].count().to_frame() sat_LA_data.columns = [] mvgd_stats = pd.concat([mvgd_stats, sat_LA_data], axis=1) agg_LA_data = LA_df[LA_df[]].groupby([])[, , ].sum() agg_LA_data.columns = [, , ] mvgd_stats = pd.concat([mvgd_stats, agg_LA_data], axis=1) mvgd_stats = mvgd_stats.fillna(0) mvgd_stats = mvgd_stats[sorted(mvgd_stats.columns.tolist())] return mvgd_stats
MV Statistics for an arbitrary network Parameters ---------- nw: :any:`list` of NetworkDing0 The MV grid(s) to be studied Returns ------- mvgd_stats : pandas.DataFrame Dataframe containing several statistical numbers about the MVGD
377,487
def extract_atoms(molecule): if molecule == : return molecule try: return float(molecule) except BaseException: pass atoms = if not molecule[0].isalpha(): i = 0 while not molecule[i].isalpha(): i += 1 prefactor = float(molecule[:i]) if prefactor < 0: prefactor = abs(prefactor) sign = else: sign = molecule = molecule[i:] else: prefactor = 1 sign = for k in range(len(molecule)): if molecule[k].isdigit(): for j in range(int(molecule[k]) - 1): atoms += molecule[k - 1] else: atoms += molecule[k] if prefactor % 1 == 0: atoms *= int(prefactor) elif prefactor % 1 == 0.5: atoms_sort = sorted(atoms) N = len(atoms) atoms = for n in range(N): for m in range(int(prefactor - 0.5)): atoms += atoms_sort[n] if n % 2 == 0: atoms += atoms_sort[n] return sign + .join(sorted(atoms))
Return a string with all atoms in molecule
377,488
def last_insert_id(self, cursor, table_name, pk_name): table_name = self.quote_name(table_name) cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name]) return cursor.fetchone()[0]
Given a cursor object that has just performed an INSERT statement into a table that has an auto-incrementing ID, returns the newly created ID. This method also receives the table name and the name of the primary-key column.
377,489
def is_module_on_std_lib_path(cls, module): module_file_real_path = os.path.realpath(module.__file__) if module_file_real_path.startswith(cls.STANDARD_LIB_PATH): return True elif os.path.splitext(module_file_real_path)[1] == : py_file_real_path = os.path.realpath(os.path.splitext(module_file_real_path)[0] + ) return py_file_real_path.startswith(cls.STANDARD_LIB_PATH) return False
Sometimes .py files are symlinked to the real python files, such as the case of virtual env. However the .pyc files are created under the virtual env directory rather than the path in cls.STANDARD_LIB_PATH. Hence this function checks for both. :param module: a module :return: True if module is on interpreter's stdlib path. False otherwise.
377,490
def format_cert_name(env=, account=, region=, certificate=None): cert_name = None if certificate: if certificate.startswith(): LOG.info("Full ARN provided...skipping lookup.") cert_name = certificate else: generated_cert_name = generate_custom_cert_name(env, region, account, certificate) if generated_cert_name: LOG.info("Found generated certificate %s from template", generated_cert_name) cert_name = generated_cert_name else: LOG.info("Using default certificate name logic") cert_name = (.format( account=account, name=certificate)) LOG.debug(, cert_name) return cert_name
Format the SSL certificate name into ARN for ELB. Args: env (str): Account environment name account (str): Account number for ARN region (str): AWS Region. certificate (str): Name of SSL certificate Returns: str: Fully qualified ARN for SSL certificate None: Certificate is not desired
377,491
def _annotate_validations(eval_files, data): for key in ["tp", "tp-calls", "fp", "fn"]: if eval_files.get(key): eval_files[key] = annotation.add_genome_context(eval_files[key], data) return eval_files
Add annotations about potential problem regions to validation VCFs.
377,492
def sine(w, A=1, phi=0, offset=0): from math import sin def f(i): return A * sin(w*i + phi) + offset return partial(force, sequence=_advance(f))
Return a driver function that can advance a sequence of sine values. .. code-block:: none value = A * sin(w*i + phi) + offset Args: w (float) : a frequency for the sine driver A (float) : an amplitude for the sine driver phi (float) : a phase offset to start the sine driver with offset (float) : a global offset to add to the driver values
377,493
def store_text_log_summary_artifact(job, text_log_summary_artifact): step_data = json.loads( text_log_summary_artifact[])[] result_map = {v: k for (k, v) in TextLogStep.RESULTS} with transaction.atomic(): for step in step_data[]: name = step[][:TextLogStep._meta.get_field().max_length] error_summary.get_error_summary(job)
Store the contents of the text log summary artifact
377,494
def change_attributes(self, bounds, radii, colors): self.n_cylinders = len(bounds) self.is_empty = True if self.n_cylinders == 0 else False if self.is_empty: self.bounds = bounds self.radii = radii self.colors = colors return self.bounds = np.array(bounds, dtype=) vertices, directions = self._gen_bounds(self.bounds) self.radii = np.array(radii, dtype=) prim_radii = self._gen_radii(self.radii) self.colors = np.array(colors, dtype=) prim_colors = self._gen_colors(self.colors) local = np.array([ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, ]).astype() local = np.tile(local, self.n_cylinders) self._verts_vbo = VertexBuffer(vertices,GL_DYNAMIC_DRAW) self._directions_vbo = VertexBuffer(directions, GL_DYNAMIC_DRAW) self._local_vbo = VertexBuffer(local,GL_DYNAMIC_DRAW) self._color_vbo = VertexBuffer(prim_colors, GL_DYNAMIC_DRAW) self._radii_vbo = VertexBuffer(prim_radii, GL_DYNAMIC_DRAW)
Reinitialize the buffers, to accomodate the new attributes. This is used to change the number of cylinders to be displayed.
377,495
def buffer_read(library, session, count): buffer = create_string_buffer(count) return_count = ViUInt32() ret = library.viBufRead(session, buffer, count, byref(return_count)) return buffer.raw[:return_count.value], ret
Reads data from device or interface through the use of a formatted I/O read buffer. Corresponds to viBufRead function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param count: Number of bytes to be read. :return: data read, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode`
377,496
def strsettings(self, indent=0, maxindent=25, width=0): out = [] makelabel = lambda name: * indent + name + settingsindent = _autoindent([makelabel(s) for s in self.options], indent, maxindent) for name in self.option_order: option = self.options[name] label = makelabel(name) settingshelp = "%s(%s): %s" % (option.formatname, option.strvalue, option.location) wrapped = self._wrap_labelled(label, settingshelp, settingsindent, width) out.extend(wrapped) return .join(out)
Return user friendly help on positional arguments. indent is the number of spaces preceeding the text on each line. The indent of the documentation is dependent on the length of the longest label that is shorter than maxindent. A label longer than maxindent will be printed on its own line. width is maximum allowed page width, use self.width if 0.
377,497
def get(self, field_paths=None, transaction=None): if isinstance(field_paths, six.string_types): raise ValueError(" must be a sequence of paths, not a string.") if field_paths is not None: mask = common_pb2.DocumentMask(field_paths=sorted(field_paths)) else: mask = None firestore_api = self._client._firestore_api try: document_pb = firestore_api.get_document( self._document_path, mask=mask, transaction=_helpers.get_transaction_id(transaction), metadata=self._client._rpc_metadata, ) except exceptions.NotFound: data = None exists = False create_time = None update_time = None else: data = _helpers.decode_dict(document_pb.fields, self._client) exists = True create_time = document_pb.create_time update_time = document_pb.update_time return DocumentSnapshot( reference=self, data=data, exists=exists, read_time=None, create_time=create_time, update_time=update_time, )
Retrieve a snapshot of the current document. See :meth:`~.firestore_v1beta1.client.Client.field_path` for more information on **field paths**. If a ``transaction`` is used and it already has write operations added, this method cannot be used (i.e. read-after-write is not allowed). Args: field_paths (Optional[Iterable[str, ...]]): An iterable of field paths (``.``-delimited list of field names) to use as a projection of document fields in the returned results. If no value is provided, all fields will be returned. transaction (Optional[~.firestore_v1beta1.transaction.\ Transaction]): An existing transaction that this reference will be retrieved in. Returns: ~.firestore_v1beta1.document.DocumentSnapshot: A snapshot of the current document. If the document does not exist at the time of `snapshot`, the snapshot `reference`, `data`, `update_time`, and `create_time` attributes will all be `None` and `exists` will be `False`.
377,498
def revise(csp, Xi, Xj, removals): "Return true if we remove a value." revised = False for x in csp.curr_domains[Xi][:]: if every(lambda y: not csp.constraints(Xi, x, Xj, y), csp.curr_domains[Xj]): csp.prune(Xi, x, removals) revised = True return revised
Return true if we remove a value.
377,499
def get_eventhub_info(self): alt_creds = { "username": self._auth_config.get("iot_username"), "password":self._auth_config.get("iot_password")} try: mgmt_auth = self._create_auth(**alt_creds) mgmt_client = uamqp.AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.debug) mgmt_client.open() mgmt_msg = Message(application_properties={: self.eh_name}) response = mgmt_client.mgmt_request( mgmt_msg, constants.READ_OPERATION, op_type=b, status_code_field=b, description_fields=b) eh_info = response.get_data() output = {} if eh_info: output[] = eh_info[b].decode() output[] = eh_info[b].decode() output[] = datetime.datetime.fromtimestamp(float(eh_info[b])/1000) output[] = eh_info[b] output[] = [p.decode() for p in eh_info[b]] return output finally: mgmt_client.close()
Get details on the specified EventHub. Keys in the details dictionary include: -'name' -'type' -'created_at' -'partition_count' -'partition_ids' :rtype: dict