Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
6,300
def get_facet_serializer(self, *args, **kwargs): assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`" facet_serializer_class = self.get_facet_serializer_class() kwargs["context"] = self.get_serializer_context() kwargs["context"].update({ "objects": kwargs.pop("objects"), "facet_query_params_text": self.facet_query_params_text, }) return facet_serializer_class(*args, **kwargs)
Return the facet serializer instance that should be used for serializing faceted output.
6,301
def unlink(self, *others): if not len(others): others = self.links[:] for p in self.links[:]: if p in others: p.teardown() return self
Unlink (disassociate) the specified properties object. @param others: The list object to unlink. Unspecified means unlink all. @type others: [L{Properties},..] @return: self @rtype: L{Properties}
6,302
def _element(cls): if not cls.__is_selector(): raise Exception("Invalid selector[%s]." %cls.__control["by"]) driver = Web.driver try: elements = WebDriverWait(driver, cls.__control["timeout"]).until(lambda driver: getattr(driver,"find_elements")(cls.__control["by"], cls.__control["value"])) except: raise Exception("Timeout at %d seconds.Element(%s) not found." %(cls.__control["timeout"],cls.__control["by"])) if len(elements) < cls.__control["index"] + 1: raise Exception("Element [%s]: Element Index Issue! There are [%s] Elements! Index=[%s]" % (cls.__name__, len(elements), cls.__control["index"])) if len(elements) > 1: print("Element [%s]: There are [%d] elements, choosed index=%d" %(cls.__name__,len(elements),cls.__control["index"])) elm = elements[cls.__control["index"]] cls.__control["index"] = 0 return elm
find the element with controls
6,303
def _load_lib(): lib_path = _find_lib_path() lib = ctypes.cdll.LoadLibrary(lib_path[0]) lib.MXGetLastError.restype = ctypes.c_char_p return lib
Load libary by searching possible path.
6,304
def countRandomBitFrequencies(numTerms = 100000, percentSparsity = 0.01): counts = SparseMatrix() size = 128*128 counts.resize(1, size) sparseBitmap = SparseMatrix() sparseBitmap.resize(1, size) random.seed(42) numWords=0 for term in xrange(numTerms): bitmap = random.sample(xrange(size), int(size*percentSparsity)) bitmap.sort() sparseBitmap.setRowFromSparse(0, bitmap, [1]*len(bitmap)) counts += sparseBitmap numWords += 1 frequencies = SparseMatrix() frequencies.resize(1, size) frequencies.copy(counts) frequencies.divide(float(numWords)) printFrequencyStatistics(counts, frequencies, numWords, size) frequencyFilename = "bit_frequencies_random.pkl" print "Saving frequency matrix in",frequencyFilename with open(frequencyFilename, "wb") as frequencyPickleFile: pickle.dump(frequencies, frequencyPickleFile) return counts
Create a uniformly random counts matrix through sampling.
6,305
def make_wheelfile_inner(base_name, base_dir=): zip_filename = base_name + ".whl" log.info("creating and adding to it", zip_filename, base_dir) deferred.append((score.get(name, 0), path)) else: writefile(path, date_time) deferred.sort() for score, path in deferred: writefile(path, date_time) return zip_filename
Create a whl file from all the files under 'base_dir'. Places .dist-info at the end of the archive.
6,306
def _input_as_parameter(self, data): self.Parameters[].on(data)
Set the input path and log path based on data (a fasta filepath)
6,307
def check_attr(self, repo_abspath, attrs): def make_process(): env = dict(environ, GIT_FLUSH=) cmd = .format(.join(attrs)) return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, cwd=repo_abspath, env=env) def read_attrs(process, repo_file_path): process.stdin.write(repo_file_path.encode() + b) process.stdin.flush() path, attr, info = b, b, b nuls_count = 0 nuls_expected = 3 * len(attrs) while nuls_count != nuls_expected: b = process.stdout.read(1) if b == b and process.poll() is not None: raise RuntimeError("check-attr exited prematurely") elif b == b: nuls_count += 1 if nuls_count % 3 == 0: yield map(self.decode_git_output, (path, attr, info)) path, attr, info = b, b, b elif nuls_count % 3 == 0: path += b elif nuls_count % 3 == 1: attr += b elif nuls_count % 3 == 2: info += b def read_attrs_old(process, repo_file_path): process.stdin.write(repo_file_path.encode() + b) process.stdin.flush() path, attr, info = b, b, b lines_count = 0 lines_expected = len(attrs) while lines_count != lines_expected: line = process.stdout.readline() info_start = line.rfind(b) if info_start == -1: raise RuntimeError("unexpected output of check-attr: {0}".format(line)) attr_start = line.rfind(b, 0, info_start) if attr_start == -1: raise RuntimeError("unexpected output of check-attr: {0}".format(line)) info = line[info_start + 2:len(line) - 1] attr = line[attr_start + 2:info_start] path = line[:attr_start] yield map(self.decode_git_output, (path, attr, info)) lines_count += 1 if not attrs: return process = make_process() try: while True: repo_file_path = yield repo_file_attrs = {} if self.git_version is None or self.git_version > (1, 8, 5): reader = read_attrs else: reader = read_attrs_old for path, attr, value in reader(process, repo_file_path): repo_file_attrs[attr] = value yield repo_file_attrs finally: process.stdin.close() process.wait()
Generator that returns attributes for given paths relative to repo_abspath. >>> g = GitArchiver.check_attr('repo_path', ['export-ignore']) >>> next(g) >>> attrs = g.send('relative_path') >>> print(attrs['export-ignore']) @param repo_abspath: Absolute path to a git repository. @type repo_abspath: str @param attrs: Attributes to check. @type attrs: [str] @rtype: generator
6,308
def add_signature(name=None, inputs=None, outputs=None): if not name: name = "default" if inputs is None: inputs = {} if outputs is None: outputs = {} if not isinstance(inputs, dict): inputs = {"default": inputs} if not isinstance(outputs, dict): outputs = {"default": outputs} message = find_signature_inputs_from_multivalued_ops(inputs) if message: logging.error(message) message = find_signature_input_colocation_error(name, inputs) if message: raise ValueError(message) saved_model_lib.add_signature(name, inputs, outputs)
Adds a signature to the module definition. NOTE: This must be called within a `module_fn` that is defining a Module. Args: name: Signature name as a string. If omitted, it is interpreted as 'default' and is the signature used when `Module.__call__` `signature` is not specified. inputs: A dict from input name to Tensor or SparseTensor to feed when applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. outputs: A dict from output name to Tensor or SparseTensor to return from applying the signature. If a single tensor is passed, it is interpreted as a dict with a single 'default' entry. Raises: ValueError: if the arguments are invalid.
6,309
def form_valid(self, form): valid = True name = form.cleaned_data.get().name user = self.request.user form.save(user=user, service_name=name) sa = ServicesActivated.objects.get(name=name) if sa.auth_required and sa.self_hosted: from django_th.services import default_provider default_provider.load_services() service_provider = default_provider.get_service(name) result = service_provider.check(self.request, user) if result is not True: form.add_error(, result) messages.error(self.request, result) return redirect(, pk=self.kwargs.get(self.pk_url_kwarg)) if valid: messages.success(self.request, _() % name.split()[1]) return HttpResponseRedirect(reverse())
save the data :param form: :return:
6,310
def reset_weights(self): self.input_block.reset_weights() self.policy_backbone.reset_weights() self.value_backbone.reset_weights() self.action_head.reset_weights() self.critic_head.reset_weights()
Initialize properly model weights
6,311
def averagingData(array, windowSize=None, averagingType=): assert averagingType in [, ] if windowSize is None: windowSize = int(len(array) / 50) if int(len(array) / 50) > 100 else 100 if averagingType == : averagedData = runningMedian(array, windowSize) elif averagingType == : averagedData = runningMean(array, len(array), windowSize) return averagedData
#TODO: docstring :param array: #TODO: docstring :param windowSize: #TODO: docstring :param averagingType: "median" or "mean" :returns: #TODO: docstring
6,312
def _task_to_text(self, task): started = self._format_date(task.get(, None)) completed = self._format_date(task.get(, None)) success = task.get(, None) success_lu = {None: , True: , False: } run_log = task.get(, {}) return .join([ % task.get(, None), % task.get(, None), % success_lu[success], % started, % completed, % run_log.get(, None), % run_log.get(, None), % run_log.get(, None)])
Return a standard formatting of a Task serialization.
6,313
def create_attachment(self, upload_stream, project, wiki_identifier, name, **kwargs): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if wiki_identifier is not None: route_values[] = self._serialize.url(, wiki_identifier, ) query_parameters = {} if name is not None: query_parameters[] = self._serialize.query(, name, ) if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method=, location_id=, version=, route_values=route_values, query_parameters=query_parameters, content=content, media_type=) response_object = models.WikiAttachmentResponse() response_object.attachment = self._deserialize(, response) response_object.eTag = response.headers.get() return response_object
CreateAttachment. Creates an attachment in the wiki. :param object upload_stream: Stream to upload :param str project: Project ID or project name :param str wiki_identifier: Wiki Id or name. :param str name: Wiki attachment name. :rtype: :class:`<WikiAttachmentResponse> <azure.devops.v5_0.wiki.models.WikiAttachmentResponse>`
6,314
def get_all_tags(self, filters=None, max_records=None, next_token=None): params = {} if max_records: params[] = max_records if next_token: params[] = next_token return self.get_list(, params, [(, Tag)])
Lists the Auto Scaling group tags. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter. :type filters: dict :param filters: The value of the filter type used to identify the tags to be returned. NOT IMPLEMENTED YET. :type max_records: int :param max_records: Maximum number of tags to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.tag.Tag` instances.
6,315
def cmd_unzip(zip_file, dest, excludes=None, options=None, template=None, runas=None, trim_output=False, password=None): jinja*/tmp/{{grains.id}}* if isinstance(excludes, six.string_types): excludes = [x.strip() for x in excludes.split()] elif isinstance(excludes, (float, six.integer_types)): excludes = [six.text_type(excludes)] cmd = [] if password: cmd.extend([, password]) if options: cmd.extend(shlex.split(options)) cmd.extend([.format(zip_file), , .format(dest)]) if excludes is not None: cmd.append() cmd.extend(excludes) result = __salt__[]( cmd, template=template, runas=runas, python_shell=False, redirect_stderr=True, output_loglevel= if password else ) if result[] != 0: raise CommandExecutionError(result[]) return _trim_files(result[].splitlines(), trim_output)
.. versionadded:: 2015.5.0 In versions 2014.7.x and earlier, this function was known as ``archive.unzip``. Uses the ``unzip`` command to unpack zip files. This command is part of the `Info-ZIP`_ suite of tools, and is typically packaged as simply ``unzip``. .. _`Info-ZIP`: http://www.info-zip.org/ zip_file Path of zip file to be unpacked dest The destination directory into which the file should be unpacked excludes : None Comma-separated list of files not to unpack. Can also be passed in a Python list. template : None Can be set to 'jinja' or another supported template engine to render the command arguments before execution: .. code-block:: bash salt '*' archive.cmd_unzip template=jinja /tmp/zipfile.zip '/tmp/{{grains.id}}' excludes=file_1,file_2 options Optional when using ``zip`` archives, ignored when usign other archives files. This is mostly used to overwrite existing files with ``o``. This options are only used when ``unzip`` binary is used. .. versionadded:: 2016.3.1 runas : None Unpack the zip file as the specified user. Defaults to the user under which the minion is running. .. versionadded:: 2015.5.0 trim_output : False The number of files we should output on success before the rest are trimmed, if this is set to True then it will default to 100 password Password to use with password protected zip files .. note:: This is not considered secure. It is recommended to instead use :py:func:`archive.unzip <salt.modules.archive.unzip>` for password-protected ZIP files. If a password is used here, then the unzip command run to extract the ZIP file will not show up in the minion log like most shell commands Salt runs do. However, the password will still be present in the events logged to the minion log at the ``debug`` log level. If the minion is logging at ``debug`` (or more verbose), then be advised that the password will appear in the log. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' archive.cmd_unzip /tmp/zipfile.zip /home/strongbad/ excludes=file_1,file_2
6,316
def get_ga_tracking_id(self): if hasattr(settings, self.ga_tracking_id_settings_key): return getattr(settings, self.ga_tracking_id_settings_key) return super(GARequestErrorReportingMixin, self).get_ga_tracking_id()
Retrieve tracking ID from settings
6,317
def del_key(self, ref): if ref not in self.keys: response = self.request("client_del_key %s" % (ref)) self.keys.remove(ref) if "success" in response: return None else: return response
Delete a key. (ref) Return None or LCDd response on error
6,318
def atlas_peer_dequeue_all( peer_queue=None ): peers = [] with AtlasPeerQueueLocked(peer_queue) as pq: while len(pq) > 0: peers.append( pq.pop(0) ) return peers
Get all queued peers
6,319
def is_underlined(r): w_namespace = get_namespace(r, ) rpr = r.find( % w_namespace) if rpr is None: return False underline = rpr.find( % w_namespace) return style_is_false(underline)
The function will return True if the r tag passed in is considered underlined.
6,320
def find_field_generators(obj): cls_dict = obj.__class__.__dict__ obj_dict = obj.__dict__ field_gens = {} add_field_generators(field_gens, cls_dict) add_field_generators(field_gens, obj_dict) return field_gens
Return dictionary with the names and instances of all tohu.BaseGenerator occurring in the given object's class & instance namespaces.
6,321
def copy_opts_for_single_ifo(opt, ifo): opt = copy.deepcopy(opt) for arg, val in vars(opt).items(): if isinstance(val, DictWithDefaultReturn): setattr(opt, arg, getattr(opt, arg)[ifo]) return opt
Takes the namespace object (opt) from the multi-detector interface and returns a namespace object for a single ifo that can be used with functions expecting output from the single-detector interface.
6,322
def Q_weir_rectangular_full_Kindsvater_Carter(h1, h2, b): r Q = 2/3.*2**0.5*(0.602 + 0.075*h1/h2)*(b - 0.001)*g**0.5*(h1 + 0.001)**1.5 return Q
r'''Calculates the flow rate across a full-channel rectangular weir from the height of the liquid above the crest of the weir, the liquid depth beneath it, and the width of the channel. Model from [1]_ as reproduced in [2]_. Flow rate is given by: .. math:: Q = \frac{2}{3}\sqrt{2}\left(0.602 + 0.0832\frac{h_1}{h_2}\right) b\sqrt{g} (h_1 +0.00125)^{1.5} Parameters ---------- h1 : float Height of the fluid above the crest of the weir [m] h2 : float Height of the fluid below the crest of the weir [m] b : float Width of the channel section [m] Returns ------- Q : float Volumetric flow rate across the weir [m^3/s] Notes ----- The following limits apply to the use of this equation: h1 > 0.03 m b > 0.15 m h2 > 0.1 m h1/h2 < 2 Examples -------- >>> Q_weir_rectangular_full_Kindsvater_Carter(h1=0.3, h2=0.4, b=2) 0.641560300081563 References ---------- .. [1] Kindsvater, Carl E., and Rolland W. Carter. "Discharge Characteristics of Rectangular Thin-Plate Weirs." Journal of the Hydraulics Division 83, no. 6 (December 1957): 1-36. .. [2] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.: Van Nostrand Reinhold Co., 1984.
6,323
def parse_temperature_response( temperature_string: str) -> Mapping[str, Optional[float]]: err_msg = .format( temperature_string) if not temperature_string or \ not isinstance(temperature_string, str): raise ParseError(err_msg) parsed_values = temperature_string.strip().split() if len(parsed_values) < 2: log.error(err_msg) raise ParseError(err_msg) data = { parse_key_from_substring(s): parse_number_from_substring(s) for s in parsed_values[:2] } if not in data or not in data: raise ParseError(err_msg) data = { : data[], : data[] } return data
Example input: "T:none C:25"
6,324
def array_2d_from_array_1d(self, padded_array_1d): padded_array_2d = self.map_to_2d_keep_padded(padded_array_1d) pad_size_0 = self.mask.shape[0] - self.image_shape[0] pad_size_1 = self.mask.shape[1] - self.image_shape[1] return (padded_array_2d[pad_size_0 // 2:self.mask.shape[0] - pad_size_0 // 2, pad_size_1 // 2:self.mask.shape[1] - pad_size_1 // 2])
Map a padded 1D array of values to its original 2D array, trimming all edge values. Parameters ----------- padded_array_1d : ndarray A 1D array of values which were computed using the *PaddedRegularGrid*.
6,325
def activate(self, span, finish_on_close): context = self._get_context() if context is None: return super(TornadoScopeManager, self).activate(span, finish_on_close) scope = _TornadoScope(self, span, finish_on_close) context.active = scope return scope
Make a :class:`~opentracing.Span` instance active. :param span: the :class:`~opentracing.Span` that should become active. :param finish_on_close: whether *span* should automatically be finished when :meth:`Scope.close()` is called. If no :func:`tracer_stack_context()` is detected, thread-local storage will be used to store the :class:`~opentracing.Scope`. Observe that in this case the active :class:`~opentracing.Span` will not be automatically propagated to the child corotuines. :return: a :class:`~opentracing.Scope` instance to control the end of the active period for the :class:`~opentracing.Span`. It is a programming error to neglect to call :meth:`Scope.close()` on the returned instance.
6,326
def daysInMonth(date): if type(date).__name__ in (, , ): date = date.toPython() month = date.month if month == 2 and not date.year % 4: return 29 return DaysInMonth.get(month, -1)
Returns the number of the days in the month for the given date. This will take into account leap years based on the inputted date's year. :param date | <datetime.date> :return <int>
6,327
def parse(cls, conn): req = cls(conn) req_line = yield from conn.reader.readline() logger().debug(, req_line) req._parse_req_line(req_line) header_line = yield from conn.reader.readline() while len(header_line) > 0 and header_line != b: try: req._parse_header(header_line) except BadHttpHeaderError as e: logger().debug(traceback.format_exc()) header_line = yield from conn.reader.readline() return req
Read a request from the HTTP connection ``conn``. May raise ``BadHttpRequestError``.
6,328
def _auto_scroll(self, *args): adj = self[].get_vadjustment() adj.set_value(adj.get_upper() - adj.get_page_size())
Scroll to the end of the text view
6,329
def __get_gp_plan(self, gp): query = urlencode({: gp}) response = requests.get(.format(self.__planner) + query, headers={: }) graph = Graph() try: graph.parse(source=StringIO.StringIO(response.text), format=) except BadSyntax: pass return graph
Request the planner a search plan for a given gp and returns the plan as a graph. :param gp: :return:
6,330
def cd(path_to): if path_to == : if not cd.previous: raise PathError() return cd(cd.previous) if not hasattr(path_to, ): path_to = makepath(path_to) try: previous = os.getcwd() except OSError as e: if in str(e): return False raise if path_to.isdir(): os.chdir(path_to) elif path_to.isfile(): os.chdir(path_to.parent) elif not os.path.exists(path_to): return False else: raise PathError( % path_to) cd.previous = previous return True
cd to the given path If the path is a file, then cd to its parent directory Remember current directory before the cd so that we can cd back there with cd('-')
6,331
def circle_touching_line(center, radius, start, end): C, R = center, radius A, B = start, end a = (B.x - A.x)**2 + (B.y - A.y)**2 b = 2 * (B.x - A.x) * (A.x - C.x) \ + 2 * (B.y - A.y) * (A.y - C.y) c = C.x**2 + C.y**2 + A.x**2 + A.y**2 \ - 2 * (C.x * A.x + C.y * A.y) - R**2 discriminant = b**2 - 4 * a * c if discriminant < 0: return False elif discriminant == 0: u = v = -b / float(2 * a) else: u = (-b + math.sqrt(discriminant)) / float(2 * a) v = (-b - math.sqrt(discriminant)) / float(2 * a) if u < 0 and v < 0: return False if u > 1 and v > 1: return False return True
Return true if the given circle intersects the given segment. Note that this checks for intersection with a line segment, and not an actual line. :param center: Center of the circle. :type center: Vector :param radius: Radius of the circle. :type radius: float :param start: The first end of the line segment. :type start: Vector :param end: The second end of the line segment. :type end: Vector
6,332
def metadata(request): conf = IdPConfig() conf.load(copy.deepcopy(settings.SAML_IDP_CONFIG)) metadata = entity_descriptor(conf) return HttpResponse(content=text_type(metadata).encode(), content_type="text/xml; charset=utf8")
Returns an XML with the SAML 2.0 metadata for this Idp. The metadata is constructed on-the-fly based on the config dict in the django settings.
6,333
def event( title, text, alert_type=None, aggregation_key=None, source_type_name=None, date_happened=None, priority=None, tags=None, hostname=None, ):
Send an event.
6,334
def get_category_metrics(self, category): slug_list = self._category_slugs(category) return self.get_metrics(slug_list)
Get metrics belonging to the given category
6,335
def cancel_subscription(self, sid): url = urljoin(self._url_base, self._event_sub_url) headers = dict( HOST=urlparse(url).netloc, SID=sid ) resp = requests.request(, url, headers=headers, auth=self.device.http_auth) resp.raise_for_status()
Unsubscribes from a previously configured subscription.
6,336
def run_kmeans(self, X, K): wX = vq.whiten(X) means, dist = vq.kmeans(wX, K, iter=100) labels, dist = vq.vq(wX, means) return means, labels
Runs k-means and returns the labels assigned to the data.
6,337
def visit_For(self, node): demo|i||a||b| iter_aliases = self.visit(node.iter) if all(isinstance(x, ContainerOf) for x in iter_aliases): target_aliases = set() for iter_alias in iter_aliases: target_aliases.add(iter_alias.containee) else: target_aliases = {node.target} self.add(node.target, target_aliases) self.aliases[node.target.id] = self.result[node.target] self.generic_visit(node) self.generic_visit(node)
For loop creates aliasing between the target and the content of the iterator >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse(""" ... def foo(a): ... for i in a: ... {i}""") >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {i} => ['|i|'] Not very useful, unless we know something about the iterated container >>> module = ast.parse(""" ... def foo(a, b): ... for i in [a, b]: ... {i}""") >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Set) {i} => ['|a|', '|b|']
6,338
def _fill(self, values): if not self._previous_line: self._previous_line = values return super(StackedLine, self)._fill(values) new_values = values + list(reversed(self._previous_line)) self._previous_line = values return new_values
Add extra values to fill the line
6,339
def processFormData(self, data, dataset_name): cols = self.datasets[dataset_name] reader = self.getCSVReader(data, reader_type=csv.reader) fieldnames = next(reader) for col in cols: varname = col["varname"] if varname not in fieldnames: raise ValueError("Column %s not found in data for dataset %s" % (varname, dataset_name,)) self._processDML(dataset_name, cols, reader)
Take a string of form data as CSV and convert to insert statements, return template and data values
6,340
def parse_headers(self, headers): for name, value in _parse_keyvalue_list(headers): self.headers[name] = value
Parses a semi-colon delimited list of headers. Example: foo=bar;baz=qux
6,341
def irfs(self, **kwargs): dsval = kwargs.get(, self.dataset(**kwargs)) tokens = dsval.split() irf_name = "%s_%s_%s" % (DATASET_DICTIONARY[ % (tokens[0], tokens[1])], EVCLASS_NAME_DICTIONARY[tokens[3]], kwargs.get()) return irf_name
Get the name of IFRs associted with a particular dataset
6,342
def sequence_names(fasta): sequences = SeqIO.parse(fasta, "fasta") records = [record.id for record in sequences] return records
return a list of the sequence IDs in a FASTA file
6,343
def add(self, type, orig, replace): ret = libxml2mod.xmlACatalogAdd(self._o, type, orig, replace) return ret
Add an entry in the catalog, it may overwrite existing but different entries.
6,344
def action(self, action): r if not self.direction == DIRECTION_OUT: return port_action = quote( .format(port=int(self.id)+1, action=action), safe= ) url = URL + ACTION.format(action=port_action) self._request(, url)
r"""Activate or deactivate an output. Use the <wait> option to activate/deactivate the port for a limited period of time. <Port ID> = Port name. Default: Name from Output.Name <a> = Action character. /=active, \=inactive <wait> = Delay before the next action. Unit: milliseconds Note: The :, / and \ characters must be percent-encoded in the URI. See Percent encoding. Example: To set output 1 to active, use 1:/. In the URI, the action argument becomes action=1%3A%2F
6,345
def send(self, message): provider_name = self._default_provider if message.provider is not None: assert message.provider in self._providers, \ .format(provider_name) provider = self.get_provider(message.provider) else: if message.routing_values is not None: provider_name = self.router(message, *message.routing_values) or self._default_provider assert provider_name in self._providers, \ .format(provider_name) provider = self.get_provider(provider_name) message.provider = provider.name message = provider.send(message) self.onSend(message) return message
Send a message object :type message: data.OutgoingMessage :param message: The message to send :rtype: data.OutgoingMessage :returns: The sent message with populated fields :raises AssertionError: wrong provider name encountered (returned by the router, or provided to OutgoingMessage) :raises MessageSendError: generic errors :raises AuthError: provider authentication failed :raises LimitsError: sending limits exceeded :raises CreditError: not enough money on the account
6,346
def main(): log = logging.getLogger(Logify.get_name() + ) log.info(, Logify.get_name()) log.debug() log.info() log.warning() log.error()
Sample usage for this python module This main method simply illustrates sample usage for this python module. :return: None
6,347
def login_required(f, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None): @wraps(f) def wrapper(request, *args, **kwargs): if is_authenticated(request.user): return f(request, *args, **kwargs) shopify_params = { k: request.GET[k] for k in [, , , ] if k in request.GET } resolved_login_url = force_str(resolve_url(login_url or settings.LOGIN_URL)) updated_login_url = add_query_parameters_to_url(resolved_login_url, shopify_params) django_login_required_decorator = django_login_required(redirect_field_name=redirect_field_name, login_url=updated_login_url) return django_login_required_decorator(f)(request, *args, **kwargs) return wrapper
Decorator that wraps django.contrib.auth.decorators.login_required, but supports extracting Shopify's authentication query parameters (`shop`, `timestamp`, `signature` and `hmac`) and passing them on to the login URL (instead of just wrapping them up and encoding them in to the `next` parameter). This is useful for ensuring that users are automatically logged on when they first access a page through the Shopify Admin, which passes these parameters with every page request to an embedded app.
6,348
def _is_entity(bpe): if isinstance(bpe, _bp()) or \ isinstance(bpe, _bpimpl()) or \ isinstance(bpe, _bp()) or \ isinstance(bpe, _bpimpl()) or \ isinstance(bpe, _bp()) or \ isinstance(bpe, _bpimpl()) or \ isinstance(bpe, _bp()) or \ isinstance(bpe, _bpimpl()) or \ isinstance(bpe, _bp()) or \ isinstance(bpe, _bpimpl()) or \ isinstance(bpe, _bp()) or \ isinstance(bpe, _bpimpl()) or \ isinstance(bpe, _bp()) or \ isinstance(bpe, _bpimpl()): return True else: return False
Return True if the element is a physical entity.
6,349
def if_url(context, url_name, yes, no): current = context["request"].resolver_match.url_name return yes if url_name == current else no
Example: %li{ class:"{% if_url 'contacts.contact_read' 'active' '' %}" }
6,350
def to_grid_locator(latitude, longitude, precision=): if precision not in (, , ): raise ValueError( % precision) if not -90 <= latitude <= 90: raise ValueError( % latitude) if not -180 <= longitude <= 180: raise ValueError( % longitude) latitude += 90.0 longitude += 180.0 locator = [] field = int(longitude / LONGITUDE_FIELD) locator.append(chr(field + 65)) longitude -= field * LONGITUDE_FIELD field = int(latitude / LATITUDE_FIELD) locator.append(chr(field + 65)) latitude -= field * LATITUDE_FIELD square = int(longitude / LONGITUDE_SQUARE) locator.append(str(square)) longitude -= square * LONGITUDE_SQUARE square = int(latitude / LATITUDE_SQUARE) locator.append(str(square)) latitude -= square * LATITUDE_SQUARE if precision in (, ): subsquare = int(longitude / LONGITUDE_SUBSQUARE) locator.append(chr(subsquare + 97)) longitude -= subsquare * LONGITUDE_SUBSQUARE subsquare = int(latitude / LATITUDE_SUBSQUARE) locator.append(chr(subsquare + 97)) latitude -= subsquare * LATITUDE_SUBSQUARE if precision == : extsquare = int(longitude / LONGITUDE_EXTSQUARE) locator.append(str(extsquare)) extsquare = int(latitude / LATITUDE_EXTSQUARE) locator.append(str(extsquare)) return .join(locator)
Calculate Maidenhead locator from latitude and longitude. Args: latitude (float): Position's latitude longitude (float): Position's longitude precision (str): Precision with which generate locator string Returns: str: Maidenhead locator for latitude and longitude Raise: ValueError: Invalid precision identifier ValueError: Invalid latitude or longitude value
6,351
def status_schedule(token): url = payload = { : token, : 168} get_response = requests.get(url, params=payload, timeout=REQUESTS_TIMEOUT) if get_response.status_code == 200 and \ not in get_response.json(): return get_response.json() return None
Returns the json string from the Hydrawise server after calling statusschedule.php. :param token: The users API token. :type token: string :returns: The response from the controller. If there was an error returns None. :rtype: string or None
6,352
def reassemble(cls, fields, document): for field_name in cls._instructions: if field_name in fields: maker = cls._instructions[field_name] with maker.target(document): document[field_name] = maker()
Take a previously assembled document and reassemble the given set of fields for it in place.
6,353
def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0): if cifar_version == "cifar10": url = _CIFAR10_URL train_files = _CIFAR10_TRAIN_FILES test_files = _CIFAR10_TEST_FILES prefix = _CIFAR10_PREFIX image_size = _CIFAR10_IMAGE_SIZE label_key = "labels" elif cifar_version == "cifar100" or cifar_version == "cifar20": url = _CIFAR100_URL train_files = _CIFAR100_TRAIN_FILES test_files = _CIFAR100_TEST_FILES prefix = _CIFAR100_PREFIX image_size = _CIFAR100_IMAGE_SIZE if cifar_version == "cifar100": label_key = "fine_labels" else: label_key = "coarse_labels" _get_cifar(tmp_dir, url) data_files = train_files if training else test_files all_images, all_labels = [], [] for filename in data_files: path = os.path.join(tmp_dir, prefix, filename) with tf.gfile.Open(path, "rb") as f: if six.PY2: data = cPickle.load(f) else: data = cPickle.load(f, encoding="latin1") images = data["data"] num_images = images.shape[0] images = images.reshape((num_images, 3, image_size, image_size)) all_images.extend([ np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images) ]) labels = data[label_key] all_labels.extend([labels[j] for j in range(num_images)]) return image_utils.image_generator( all_images[start_from:start_from + how_many], all_labels[start_from:start_from + how_many])
Image generator for CIFAR-10 and 100. Args: cifar_version: string; one of "cifar10" or "cifar100" tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. Returns: An instance of image_generator that produces CIFAR-10 images and labels.
6,354
def add_caveat(self, cav, key=None, loc=None): s Location field has the prefix "local " the caveat is added as a client self-discharge caveat using the public key base64-encoded in the rest of the location. In this case, the Condition field must be empty. The resulting third-party caveat will encode the condition "true" encrypted with that public key. @param cav the checkers.Caveat to be added. @param key the public key to encrypt third party caveat. @param loc locator to find information on third parties when adding third party caveats. It is expected to have a third_party_info method that will be called with a location string and should return a ThirdPartyInfo instance holding the requested information. no private key to encrypt third party caveatcannot specify caveat condition in local third-party caveatlocaltrueno locator when adding third party caveatre encoding for an earlier client or third party which does id = caveat_info else: id = self._new_caveat_id(self._caveat_id_prefix) self._caveat_data[id] = caveat_info self._macaroon.add_third_party_caveat(cav.location, root_key, id)
Add a caveat to the macaroon. It encrypts it using the given key pair and by looking up the location using the given locator. As a special case, if the caveat's Location field has the prefix "local " the caveat is added as a client self-discharge caveat using the public key base64-encoded in the rest of the location. In this case, the Condition field must be empty. The resulting third-party caveat will encode the condition "true" encrypted with that public key. @param cav the checkers.Caveat to be added. @param key the public key to encrypt third party caveat. @param loc locator to find information on third parties when adding third party caveats. It is expected to have a third_party_info method that will be called with a location string and should return a ThirdPartyInfo instance holding the requested information.
6,355
def get_config_filename(args): experiment_id = check_experiment_id(args) if experiment_id is None: print_error() exit(1) experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() return experiment_dict[experiment_id][]
get the file name of config file
6,356
def dad_status_output_dad_last_state(self, **kwargs): config = ET.Element("config") dad_status = ET.Element("dad_status") config = dad_status output = ET.SubElement(dad_status, "output") dad_last_state = ET.SubElement(output, "dad-last-state") dad_last_state.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
6,357
def flatten(list_of_lists): flat_list = [] for sublist in list_of_lists: if isinstance(sublist, string_types) or isinstance(sublist, int): flat_list.append(sublist) elif sublist is None: continue elif not isinstance(sublist, string_types) and len(sublist) == 1: flat_list.append(sublist[0]) else: flat_list.append(tuple(sublist)) return flat_list
Flatten a list of lists but maintain strings and ints as entries.
6,358
def add_gate_option_group(parser): gate_group = parser.add_argument_group("Options for gating data.") gate_group.add_argument("--gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data before " "filtering.") gate_group.add_argument("--gate-overwhitened", action="store_true", help="Overwhiten data first, then apply the " "gates specified in --gate. Overwhitening " "allows for sharper tapers to be used, " "since lines are not blurred.") gate_group.add_argument("--psd-gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data used " "for computing the PSD. Gates are applied " "prior to FFT-ing the data for PSD " "estimation.") return gate_group
Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance.
6,359
def validateRequest(self, uri, postVars, expectedSignature): s = uri for k, v in sorted(postVars.items()): s += k + v return (base64.encodestring(hmac.new(self.auth_token, s, sha1).digest()).\ strip() == expectedSignature)
validate a request from plivo uri: the full URI that Plivo requested on your server postVars: post vars that Plivo sent with the request expectedSignature: signature in HTTP X-Plivo-Signature header returns true if the request passes validation, false if not
6,360
def to_tf_matrix(expression_matrix, gene_names, tf_names): tuples = [(index, gene) for index, gene in enumerate(gene_names) if gene in tf_names] tf_indices = [t[0] for t in tuples] tf_matrix_names = [t[1] for t in tuples] return expression_matrix[:, tf_indices], tf_matrix_names
:param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: a list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: a list of transcription factor names. Should be a subset of gene_names. :return: tuple of: 0: A numpy matrix representing the predictor matrix for the regressions. 1: The gene names corresponding to the columns in the predictor matrix.
6,361
def _get_co_name(self, context): try: co_name = context.state[self.name][self.KEY_CO_NAME] logger.debug("Found CO {} from state".format(co_name)) except KeyError: co_name = self._get_co_name_from_path(context) logger.debug("Found CO {} from request path".format(co_name)) return co_name
Obtain the CO name previously saved in the request state, or if not set use the request path obtained from the current context to determine the target CO. :type context: The current context :rtype: string :param context: The current context :return: CO name
6,362
def make_parser_with_config_adder(parser, config): def internal(arg, **kwargs): invert = { :, :, } if arg.startswith(): key = arg[5:] else: key = arg[2:] if in kwargs: if key in config: kwargs[] = config[key] del config[key] action = kwargs.get() if action in invert: exclusive_grp = parser.add_mutually_exclusive_group() exclusive_grp.add_argument(arg, **kwargs) kwargs[] = invert[action] kwargs[] = % arg if arg.startswith(): arg = % arg[5:] else: arg = % arg[2:] exclusive_grp.add_argument(arg, **kwargs) else: parser.add_argument(arg, **kwargs) return internal
factory function for a smarter parser: return an utility function that pull default from the config as well. Pull the default for parser not only from the ``default`` kwarg, but also if an identical value is find in ``config`` where leading ``--`` or ``--no`` is removed. If the option is a boolean flag, automatically register an opposite, exclusive option by prepending or removing the `--no-`. This is useful to overwrite config in ``.travis.yml`` Mutate the config object and remove know keys in order to detect unused options afterwoard.
6,363
def draw(self, scale=0.7, filename=None, style=None, output=, interactive=False, line_length=None, plot_barriers=True, reverse_bits=False, justify=None): from qiskit.tools import visualization return visualization.circuit_drawer(self, scale=scale, filename=filename, style=style, output=output, interactive=interactive, line_length=line_length, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify)
Draw the quantum circuit Using the output parameter you can specify the format. The choices are: 0. text: ASCII art string 1. latex: high-quality images, but heavy external software dependencies 2. matplotlib: purely in Python with no external dependencies Defaults to an overcomplete basis, in order to not alter gates. Args: scale (float): scale of image to draw (shrink if < 1) filename (str): file path to save image to style (dict or str): dictionary of style or file name of style file. You can refer to the :ref:`Style Dict Doc <style-dict-doc>` for more information on the contents. output (str): Select the output method to use for drawing the circuit. Valid choices are `text`, `latex`, `latex_source`, `mpl`. interactive (bool): when set true show the circuit in a new window (for `mpl` this depends on the matplotlib backend being used supporting this). Note when used with either the `text` or the `latex_source` output type this has no effect and will be silently ignored. line_length (int): sets the length of the lines generated by `text` reverse_bits (bool): When set to True reverse the bit order inside registers for the output visualization. plot_barriers (bool): Enable/disable drawing barriers in the output circuit. Defaults to True. justify (string): Options are `left`, `right` or `none`, if anything else is supplied it defaults to left justified. It refers to where gates should be placed in the output circuit if there is an option. `none` results in each gate being placed in its own column. Currently only supported by text drawer. Returns: PIL.Image or matplotlib.figure or str or TextDrawing: * PIL.Image: (output `latex`) an in-memory representation of the image of the circuit diagram. * matplotlib.figure: (output `mpl`) a matplotlib figure object for the circuit diagram. * str: (output `latex_source`). The LaTeX source code. * TextDrawing: (output `text`). A drawing that can be printed as ascii art Raises: VisualizationError: when an invalid output method is selected
6,364
def create_can_publish_and_can_republish_permissions(sender, **kwargs): for model in sender.get_models(): if not issubclass(model, PublishingModel): continue content_type = ContentType.objects.get_for_model(model) permission, created = Permission.objects.get_or_create( content_type=content_type, codename=, defaults=dict(name= % model.__name__)) permission, created = Permission.objects.get_or_create( content_type=content_type, codename=, defaults=dict(name= % model.__name__))
Add `can_publish` and `ca_nrepublish` permissions for each publishable model in the system.
6,365
def get(self, request, bot_id, format=None): return super(TelegramBotList, self).get(request, bot_id, format)
Get list of Telegram bots --- serializer: TelegramBotSerializer responseMessages: - code: 401 message: Not authenticated
6,366
def emit_message(self, message): try: nickname_color = self.nicknames[self.nickname] except KeyError: return message = message[:settings.MAX_MESSAGE_LENGTH] if message.startswith("/"): self.connection.send_raw(message.lstrip("/")) return self.message_channel(message) self.namespace.emit("message", self.nickname, message, nickname_color)
Send a message to the channel. We also emit the message back to the sender's WebSocket.
6,367
def _handle_ansi_color_codes(self, s): parts = HtmlReporter._ANSI_COLOR_CODE_RE.split(s) ret = [] span_depth = 0 for i in range(0, len(parts), 2): ret.append(parts[i]) if i + 1 < len(parts): for code in parts[i + 1].split(): if code == 0: while span_depth > 0: ret.append() span_depth -= 1 else: ret.append(.format(code)) span_depth += 1 while span_depth > 0: ret.append() span_depth -= 1 return .join(ret)
Replace ansi escape sequences with spans of appropriately named css classes.
6,368
def sqrt_rc_imp(Ns,alpha,M=6): n = np.arange(-M*Ns,M*Ns+1) b = np.zeros(len(n)) Ns *= 1.0 a = alpha for i in range(len(n)): if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2: b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a))) else: b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2)) b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a)) return b
A truncated square root raised cosine pulse used in digital communications. The pulse shaping factor :math:`0 < \\alpha < 1` is required as well as the truncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`. Parameters ---------- Ns : number of samples per symbol alpha : excess bandwidth factor on (0, 1), e.g., 0.35 M : equals RC one-sided symbol truncation factor Returns ------- b : ndarray containing the pulse shape Notes ----- The pulse shape b is typically used as the FIR filter coefficients when forming a pulse shaped digital communications waveform. When square root raised cosine (SRC) pulse is used to generate Tx signals and at the receiver used as a matched filter (receiver FIR filter), the received signal is now raised cosine shaped, thus having zero intersymbol interference and the optimum removal of additive white noise if present at the receiver input. Examples -------- Ten samples per symbol and :math:`\\alpha = 0.35`. >>> import matplotlib.pyplot as plt >>> from numpy import arange >>> from sk_dsp_comm.digitalcom import sqrt_rc_imp >>> b = sqrt_rc_imp(10,0.35) >>> n = arange(-10*6,10*6+1) >>> plt.stem(n,b) >>> plt.show()
6,369
def validate_filters_or_records(filters_or_records): if not filters_or_records: raise ValueError() if not isinstance(filters_or_records[0], (Record, tuple)): raise ValueError() _type = type(filters_or_records[0]) for item in filters_or_records: if not isinstance(item, _type): raise ValueError("Expected filter tuple or Record, received {0}".format(item)) return _type
Validation for filters_or_records variable from bulk_modify and bulk_delete
6,370
async def connect(self): if isinstance(self.connection, dict): kwargs = self.connection.copy() address = ( kwargs.pop(, ), kwargs.pop(, 6379) ) redis_kwargs = kwargs elif isinstance(self.connection, aioredis.Redis): self._pool = self.connection else: address = self.connection redis_kwargs = {} if self._pool is None: async with self._lock: if self._pool is None: self.log.debug(, repr(self)) self._pool = await self._create_redis_pool( address, **redis_kwargs, minsize=1, maxsize=100) return await self._pool
Get an connection for the self instance
6,371
def _load32(ins): output = _32bit_oper(ins.quad[2]) output.append() output.append() return output
Load a 32 bit value from a memory address If 2nd arg. start with '*', it is always treated as an indirect value.
6,372
def strip_trailing_slashes(self, path): m = re.match(r"(.*)/+$", path) if (m is None): return(path) return(m.group(1))
Return input path minus any trailing slashes.
6,373
def _split_iso9660_filename(fullname): namesplit = fullname.split(b) version = b if len(namesplit) > 1: version = namesplit.pop() rest = b.join(namesplit) dotsplit = rest.split(b) if len(dotsplit) == 1: name = dotsplit[0] extension = b else: name = b.join(dotsplit[:-1]) extension = dotsplit[-1] return (name, extension, version)
A function to split an ISO 9660 filename into its constituent parts. This is the name, the extension, and the version number. Parameters: fullname - The name to split. Returns: A tuple containing the name, extension, and version.
6,374
def lrucache(func, size): if size == 0: return func elif size < 0: raise ValueError("size argument must be a positive integer") if not is_arity(1, func): raise ValueError("The function must be unary (take a single argument)") cache = OrderedDict() def wrapper(x): if not(type(x) is np.ndarray): raise ValueError("Input must be an ndarray") if x.size <= 1e4: key = hash(x.tostring()) else: key = hash(repr(x)) if key not in cache: if len(cache) >= size: cache.popitem(last=False) cache[key] = func(x) return cache[key] return wrapper
A simple implementation of a least recently used (LRU) cache. Memoizes the recent calls of a computationally intensive function. Parameters ---------- func : function Must be unary (takes a single argument) size : int The size of the cache (number of previous calls to store)
6,375
def console_output(msg, logging_msg=None): assert isinstance(msg, bytes) assert isinstance(logging_msg, bytes) or logging_msg is None from polysh import remote_dispatcher remote_dispatcher.log(logging_msg or msg) if remote_dispatcher.options.interactive: from polysh.stdin import the_stdin_thread the_stdin_thread.no_raw_input() global last_status_length if last_status_length: safe_write(.format( last_status_length * ).encode()) last_status_length = 0 safe_write(msg)
Use instead of print, to clear the status information before printing
6,376
def read_file(self, location): try: return yaml.load(open(location)) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error: raise self.BadFileErrorKls("Failed to read yaml", location=location, error_type=error.__class__.__name__, error="{0}{1}".format(error.problem, error.problem_mark))
Read in a yaml file and return as a python object
6,377
def timeseries(self): if self._timeseries is None: if isinstance(self.grid.network.timeseries.generation_fluctuating. columns, pd.MultiIndex): if self.weather_cell_id: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[ self.type, self.weather_cell_id].to_frame() except KeyError: logger.exception("No time series for type {} and " "weather cell ID {} given.".format( self.type, self.weather_cell_id)) raise else: logger.exception("No weather cell ID provided for " "fluctuating generator {}.".format( repr(self))) raise KeyError else: try: timeseries = self.grid.network.timeseries.\ generation_fluctuating[self.type].to_frame() except KeyError: logger.exception("No time series for type {} " "given.".format(self.type)) raise timeseries = timeseries * self.nominal_capacity if self.curtailment is not None: timeseries = timeseries.join( self.curtailment.to_frame(), how=) timeseries.p = timeseries.p - timeseries.curtailment.fillna(0) if self.timeseries_reactive is not None: timeseries[] = self.timeseries_reactive else: timeseries[] = timeseries[] * self.q_sign * tan(acos( self.power_factor)) return timeseries else: return self._timeseries.loc[ self.grid.network.timeseries.timeindex, :]
Feed-in time series of generator It returns the actual time series used in power flow analysis. If :attr:`_timeseries` is not :obj:`None`, it is returned. Otherwise, :meth:`timeseries` looks for generation and curtailment time series of the according type of technology (and weather cell) in :class:`~.grid.network.TimeSeries`. Returns ------- :pandas:`pandas.DataFrame<dataframe>` DataFrame containing active power in kW in column 'p' and reactive power in kVA in column 'q'.
6,378
def _map_query_path_to_location_info(query_metadata_table): query_path_to_location_info = {} for location, location_info in query_metadata_table.registered_locations: if not isinstance(location, Location): continue if location.query_path in query_path_to_location_info: equivalent_location_info = query_path_to_location_info[location.query_path] if not _location_infos_equal(location_info, equivalent_location_info): raise AssertionError( u u u.format( location.query_path, location_info, equivalent_location_info)) query_path_to_location_info[location.query_path] = location_info return query_path_to_location_info
Create a map from each query path to a LocationInfo at that path. Args: query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Returns: Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
6,379
def check_vip_ip(self, ip, environment_vip): uri = % (ip, environment_vip) return super(ApiNetworkIPv6, self).get(uri)
Check available ipv6 in environment vip
6,380
def warning(self, msg, indent=0, **kwargs): return self.logger.warning(self._indent(msg, indent), **kwargs)
invoke ``self.logger.warning``
6,381
def triangle_normal(tri): vec1 = vector_generate(tri.vertices[0].data, tri.vertices[1].data) vec2 = vector_generate(tri.vertices[1].data, tri.vertices[2].data) return vector_cross(vec1, vec2)
Computes the (approximate) normal vector of the input triangle. :param tri: triangle object :type tri: elements.Triangle :return: normal vector of the triangle :rtype: tuple
6,382
def active(self): qs = self.get_queryset() return qs.filter( models.Q( models.Q(start_date__isnull=True) | models.Q(start_date__lte=now().date()) ) & models.Q( models.Q(end_date__isnull=True) | models.Q(end_date__gte=now().date()) ) ).distinct()
Returns all outlets that are currently active and have sales.
6,383
def _replace_type_to_regex(cls, match): groupdict = match.groupdict() _type = groupdict.get() type_regex = cls.TYPE_REGEX_MAP.get(_type, ) name = groupdict.get() return r.format( name=name, type_regex=type_regex )
/<int:id> -> r'(?P<id>\d+)'
6,384
def getSpec(cls): ns = dict( description=KNNClassifierRegion.__doc__, singleNodeOnly=True, inputs=dict( categoryIn=dict( description= , dataType=, count=0, required=True, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), bottomUpIn=dict( description=s groupsReal32Partition ID of the input sampleReal32Auxiliary data from the sensorReal32A vector representing, for each category index, the likelihood that the input to the node belongs to that category based on the number of neighbors of that category that are among the nearest K.Real32A vector that lists, in descending order of the match, the positions of the prototypes that best match the input pattern.Real32A vector representing, for each category index, the probability that the input to the node belongs to that category based on the distance to the nearest neighbor of each category.Real32Boolean (0/1) indicating whether or not a region is in learning mode.UInt32boolReadWriteBoolean (0/1) indicating whether or not a region is in inference mode.UInt32boolReadWriteDuring learning, inputs are learned with probability equal to this parameter. If set to 1.0, the default, all inputs will be considered (subject to other tests).Real32CreateReadWriteConfusion matrix accumulated during inference. Reset with reset(). This is available to Python client code only.HandleReadThe number of active elements in the "categoriesOut" output.UInt32ReadAn integer indicating the number of categories that have been learnedUInt32ReadNumber of patterns learned by the classifier.UInt32ReadThe actual patterns learned by the classifier, returned as a matrix.HandleReadThe number of nearest neighbors to use during inference.UInt32CreateThe maximal number of categories the classifier will distinguish between.UInt32CreateThe norm to use for a distance metric (i.e., the "p" in Lp-norm)Real32ReadWriteCreateMethod used to compute distances between inputs andprototypes. Possible options are norm, rawOverlap, pctOverlapOfLarger, and pctOverlapOfProtoenum: norm, rawOverlap, pctOverlapOfLarger, pctOverlapOfProto, pctOverlapOfInputnormReadWriteIf True, categoryProbabilitiesOut is the probability of each category based on the distance to the nearest neighbor of each category. If False, categoryProbabilitiesOut is the percentage of neighbors among the top K that are of each category.UInt32boolCreateDistance Threshold. If a pattern that is less than distThreshold apart from the input pattern already exists in the KNN memory, then the input pattern is not added to KNN memory.Real32ReadWriteInput binarization threshold, used if "doBinarization" is True.Real32CreateWhether or not to binarize the input vectors.UInt32boolCreateA boolean flag that determines whether or not the KNNClassifier will use sparse MemoryUInt32CreateReal32ReadWriteIf sparse memory is used, input variables whose absolute value is less than this threshold will be stored as zeroReal32CreateWhether to multiply sparseThreshold by max value in inputUInt32boolCreateOnly this many elements of the input are stored. All elements are stored if 0.UInt32CreateA boolean indicating whether or not data shouldbe "sphered" (i.e. each dimension should be normalized suchthat its mean and variance are zero and one, respectively.) This sphering normalization would be performed after all training samples had been received but before inference was performed. The dimension-specific normalization constants would then be applied to all future incoming vectors prior to performing conventional NN inference.UInt32boolCreateIf not 0, carries out SVD transformation after that many samples have been seen.UInt32CreateNumber of dimensions to keep after SVD if greater than 0. If set to -1 it is considered unspecified. If set to 0 it is consider "adaptive" and the number is chosen automatically.Int32CreateThe smallest singular value which is retained as a fraction of the largest singular value. This is used only if SVDDimCount==0 ("adaptive").UInt32CreateWhether or not the classifier should use auxiliary input data.UInt32boolCreateWhether or not the classifier should ONLUY use the auxiliary input data.UInt32boolCreateAn integer that controls the verbosity level, 0 means no verbose output, increasing integers provide more verbosity.UInt32ReadWriteWhether to store all the protoScores in an array, rather than just the ones for the last inference. When this parameter is changed from True to False, all the scores are discarded except for the most recent one.UInt32boolReadWriteA boolean flag that determines whether ornot the KNNClassifier should replace duplicatesduring learning. This should be on when onlinelearning.UInt32boolReadWriteIf >= 1, we assume the input is organized into columns, in the same manner as the temporal memory AND whenever we store a new prototype, we only store the start cell (first cell) in any column which is bursting.colum UInt32CreateLimits the maximum number of the training patterns stored. When KNN learns in a fixed capacity mode, the unused patterns are deleted once the number of stored patterns is greater than maxStoredPatternscolumns. [-1 is no limit] Int32Create'), ), commands=dict() ) return ns
Overrides :meth:`nupic.bindings.regions.PyRegion.PyRegion.getSpec`.
6,385
def generate_query_string(self, otp, nonce, timestamp=False, sl=None, timeout=None): data = [(, self.client_id), (, otp), (, nonce)] if timestamp: data.append((, )) if sl is not None: if sl not in range(0, 101) and sl not in [, ]: raise Exception( ) data.append((, sl)) if timeout: data.append((, timeout)) query_string = urlencode(data) if self.key: hmac_signature = self.generate_message_signature(query_string) hmac_signature = hmac_signature query_string += % (hmac_signature.replace(, )) return query_string
Returns a query string which is sent to the validation servers.
6,386
def controller(self): if hasattr(self, ): if len(self.controllers) > 1: raise TypeError("Only one controller per account.") return self.controllers[0] raise AttributeError("There is no controller assigned.")
Show current linked controllers.
6,387
def get_all_webhooks(self, **kwargs): kwargs[] = True if kwargs.get(): return self.get_all_webhooks_with_http_info(**kwargs) else: (data) = self.get_all_webhooks_with_http_info(**kwargs) return data
Get all webhooks for a customer # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_webhooks(async_req=True) >>> result = thread.get() :param async_req bool :param int offset: :param int limit: :return: ResponseContainerPagedNotificant If the method is called asynchronously, returns the request thread.
6,388
async def write(self, data): if type(data) != bytes: data = self._encode_body(data) self.protocol.push_data(b"%x\r\n%b\r\n" % (len(data), data)) await self.protocol.drain()
Writes a chunk of data to the streaming response. :param data: bytes-ish data to be written.
6,389
def _item_check(self, dim_vals, data): if not self._check_items: return elif self.data_type is not None and not isinstance(data, self.data_type): if isinstance(self.data_type, tuple): data_type = tuple(dt.__name__ for dt in self.data_type) else: data_type = self.data_type.__name__ raise TypeError( .format(slf=type(self).__name__, data=type(data).__name__, restr=data_type)) elif not len(dim_vals) == self.ndims: raise KeyError( % (len(dim_vals), self.ndims))
Applies optional checks to individual data elements before they are inserted ensuring that they are of a certain type. Subclassed may implement further element restrictions.
6,390
def get_mapped_filenames(self, memoryMap = None): hProcess = self.get_handle( win32.PROCESS_VM_READ | win32.PROCESS_QUERY_INFORMATION ) if not memoryMap: memoryMap = self.get_memory_map() mappedFilenames = dict() for mbi in memoryMap: if mbi.Type not in (win32.MEM_IMAGE, win32.MEM_MAPPED): continue baseAddress = mbi.BaseAddress fileName = "" try: fileName = win32.GetMappedFileName(hProcess, baseAddress) fileName = PathOperations.native_to_win32_pathname(fileName) except WindowsError: pass mappedFilenames[baseAddress] = fileName return mappedFilenames
Retrieves the filenames for memory mapped files in the debugee. @type memoryMap: list( L{win32.MemoryBasicInformation} ) @param memoryMap: (Optional) Memory map returned by L{get_memory_map}. If not given, the current memory map is used. @rtype: dict( int S{->} str ) @return: Dictionary mapping memory addresses to file names. Native filenames are converted to Win32 filenames when possible.
6,391
def implementation(self, commands_module: arg(short_option=) = DEFAULT_COMMANDS_MODULE, config_file: arg(short_option=) = None, globals_: arg( container=dict, type=json_value, help= ) = None, env: arg(help=) = None, version: arg(help=) = None, echo: arg( type=bool, help=, inverse_help= ) = None, environ: arg( container=dict, help= ) = None, info: arg(help=) = False, list_commands: arg(help=) = False, debug: arg( type=bool, help= ) = None, *, all_argv=(), run_argv=(), command_argv=(), cli_args=()): collection = Collection.load_from_module(commands_module) config_file = self.find_config_file(config_file) cli_globals = globals_ or {} if env: cli_globals[] = env if version: cli_globals[] = version if echo is not None: cli_globals[] = echo if debug is not None: cli_globals[] = debug if config_file: args_from_file = self.read_config_file(config_file, collection) args = merge_dicts(args_from_file, {: environ or {}}) config_file_globals = args[] env = cli_globals.get() or config_file_globals.get() if env: envs = args[] try: env_globals = envs[env] except KeyError: raise RunnerError(.format_map(locals())) globals_ = merge_dicts(config_file_globals, env_globals, cli_globals) globals_[] = envs else: globals_ = merge_dicts(config_file_globals, cli_globals) default_args = {name: {} for name in collection} default_args = merge_dicts(default_args, args.get() or {}) for command_name, command_default_args in default_args.items(): command = collection[command_name] for name in tuple(command_default_args): param = command.find_parameter(name) if param is None: raise RunnerError( .format_map(locals())) if param is not None and name != param.name: command_default_args[param.name] = command_default_args.pop(name) for name, value in command_default_args.items(): command_arg = command.find_arg(name) if command_arg.container and isinstance(value, list): command_default_args[name] = command_arg.container(value) default_args = {name: args for name, args in default_args.items() if args} environ = args[] else: globals_ = cli_globals default_args = {} environ = environ or {} debug = globals_.get(, False) show_info = info or list_commands or not command_argv or debug print_and_exit = info or list_commands globals_, default_args, environ = self.interpolate(globals_, default_args, environ) if show_info: print(, __version__) if debug: print() printer.debug(, commands_module) printer.debug(, config_file) printer.debug(, all_argv) printer.debug(, run_argv) printer.debug(, command_argv) items = ( (, globals_), (, default_args), (, environ), ) for label, data in items: if data: printer.debug(label) for k in sorted(data): v = data[k] printer.debug(.format_map(locals())) if environ: os.environ.update(environ) collection.set_attrs(debug=debug) collection.set_default_args(default_args) runner = CommandRunner(collection, debug) if print_and_exit: if list_commands: runner.print_usage() elif not command_argv: printer.warning() runner.print_usage() else: runner.run(command_argv)
Run one or more commands in succession. For example, assume the commands ``local`` and ``remote`` have been defined; the following will run ``ls`` first on the local host and then on the remote host:: runcommands local ls remote <host> ls When a command name is encountered in ``argv``, it will be considered the starting point of the next command *unless* the previous item in ``argv`` was an option like ``--xyz`` that expects a value (i.e., it's not a flag). To avoid ambiguity when an option value matches a command name, the value can be prepended with a colon to force it to be considered a value and not a command name.
6,392
def liste_campagnes(self, campagne=None): condition = "" if campagne: condition = "WHERE NOM_COURT_CM= SELECT NOM_COURT_CM AS CAMPAGNE, IDENTIFIANT AS STATION, LIBELLE AS LIBELLE_CM, DATEDEB AS DEBUT, DATEFIN AS FIN FROM CAMPMES INNER JOIN CAMPMES_STATION USING (NOM_COURT_CM) INNER JOIN STATION USING (NOM_COURT_SIT) %s ORDER BY DATEDEB DESC""" % condition return psql.read_sql(_sql, self.conn)
Liste des campagnes de mesure et des stations associées Paramètres: campagne: Si définie, liste des stations que pour cette campagne
6,393
def ecef2ned(x: float, y: float, z: float, lat0: float, lon0: float, h0: float, ell: Ellipsoid = None, deg: bool = True) -> Tuple[float, float, float]: e, n, u = ecef2enu(x, y, z, lat0, lon0, h0, ell, deg=deg) return n, e, -u
Convert ECEF x,y,z to North, East, Down Parameters ---------- x : float or numpy.ndarray of float ECEF x coordinate (meters) y : float or numpy.ndarray of float ECEF y coordinate (meters) z : float or numpy.ndarray of float ECEF z coordinate (meters) lat0 : float Observer geodetic latitude lon0 : float Observer geodetic longitude h0 : float observer altitude above geodetic ellipsoid (meters) ell : Ellipsoid, optional reference ellipsoid deg : bool, optional degrees input/output (False: radians in/out) Results ------- n : float or numpy.ndarray of float North NED coordinate (meters) e : float or numpy.ndarray of float East NED coordinate (meters) d : float or numpy.ndarray of float Down NED coordinate (meters)
6,394
def write_json(path, params): logger.debug("write %s to %s", params, path) if path.startswith("s3://"): bucket = get_boto3_bucket(path.split("/")[2]) key = "/".join(path.split("/")[3:]) logger.debug("upload %s", key) bucket.put_object( Key=key, Body=json.dumps(params, sort_keys=True, indent=4) ) else: makedirs(os.path.dirname(path)) with open(path, ) as dst: json.dump(params, dst, sort_keys=True, indent=4)
Write local or remote.
6,395
def username(self, value=None): if value is not None: return URL._mutate(self, username=value) return unicode_unquote(self._tuple.username)
Return or set the username :param string value: the new username to use :returns: string or new :class:`URL` instance
6,396
def is_playing_line_in(self): response = self.avTransport.GetPositionInfo([ (, 0), (, ) ]) track_uri = response[] return re.match(r, track_uri) is not None
bool: Is the speaker playing line-in?
6,397
def UpdateCronJob(self, cronjob_id, last_run_status=unchanged, last_run_time=unchanged, current_run_id=unchanged, state=unchanged, forced_run_requested=unchanged):
Updates run information for an existing cron job. Args: cronjob_id: The id of the cron job to update. last_run_status: A CronJobRunStatus object. last_run_time: The last time a run was started for this cron job. current_run_id: The id of the currently active run. state: The state dict for stateful cron jobs. forced_run_requested: A boolean indicating if a forced run is pending for this job. Raises: UnknownCronJobError: A cron job with the given id does not exist.
6,398
def get_candidate_election(self, election): return CandidateElection.objects.get(candidate=self, election=election)
Get a CandidateElection.
6,399
def enforce_dependencies(cls, functions, kind): for dependency, dependent_dict in six.iteritems(cls.dependency_dict[kind]): for (mod_name, func_name), (frame, params) in six.iteritems(dependent_dict): if in params or in params: try: retcode = cls.run_command(dependency, mod_name, func_name) except OSError as exc: if exc.errno == errno.ENOENT: log.trace( , dependency, exc.filename ) else: log.trace( %s\, dependency, exc ) retcode = -1 if in params: if params[] == retcode: continue elif in params: if params[]: if retcode != 0: continue else: if retcode == 0: continue elif dependency is True: log.trace( , mod_name, func_name ) continue elif dependency in frame.f_globals \ or dependency in frame.f_locals: log.trace( , dependency, mod_name ) continue log.trace( , mod_name, func_name, dependency ) if frame: try: func_name = frame.f_globals[][func_name] except (AttributeError, KeyError): pass mod_key = .format(mod_name, func_name) continue
This is a class global method to enforce the dependencies that you currently know about. It will modify the "functions" dict and remove/replace modules that are missing dependencies.