Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
7,900
def _position_encoding_init(max_length, dim): position_enc = np.arange(max_length).reshape((-1, 1)) \ / (np.power(10000, (2. / dim) * np.arange(dim).reshape((1, -1)))) position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) return position_enc
Init the sinusoid position encoding table
7,901
def get_nift_values() -> Mapping[str, str]: r = get_bel_resource(NIFT) return { name.lower(): name for name in r[] }
Extract the list of NIFT names from the BEL resource and builds a dictionary mapping from the lowercased version to the uppercase version.
7,902
def redo(self, channel, image): chname = channel.name if image is None: return imname = image.get(, ) iminfo = channel.get_image_info(imname) timestamp = iminfo.time_modified if timestamp is None: reason = iminfo.get(, None) if reason is not None: self.fv.show_error( "{0} invoked callback to ChangeHistory with a " "reason but without a timestamp. The plugin invoking the " "callback is no longer be compatible with Ginga. " "Please contact plugin developer to update the plugin " "to use self.fv.update_image_info() like Mosaic " "plugin.".format(imname)) self.remove_image_info_cb(self.fv, channel, iminfo) return self.add_entry(chname, iminfo)
Add an entry with image modification info.
7,903
def attr_string(filterKeys=(), filterValues=(), **kwargs): return .join([str(k)++repr(v) for k, v in kwargs.items() if k not in filterKeys and v not in filterValues])
Build a string consisting of 'key=value' substrings for each keyword argument in :kwargs: @param filterKeys: list of key names to ignore @param filterValues: list of values to ignore (e.g. None will ignore all key=value pairs that has that value.
7,904
def purge_tokens(self, input_token_attrs=None): if input_token_attrs is None: remove_attrs = self.token_attrs else: remove_attrs = [token_attr for token_attr in self.token_attrs if token_attr.token in input_token_attrs] self.token_attrs = [token_attr for token_attr in self.token_attrs if token_attr not in remove_attrs]
Removes all specified token_attrs that exist in instance.token_attrs :param token_attrs: list(str), list of string values of tokens to remove. If None, removes all
7,905
def fetch_and_index(self, fetch_func): "Fetch data with func, return dict indexed by ID" data, e = fetch_func() if e: raise e yield {row[]: row for row in data}
Fetch data with func, return dict indexed by ID
7,906
def search_all(self, quota=50, format=): limit quota_left = quota results = [] while quota_left > 0: more_results = self._search(quota_left, format) if not more_results: break results += more_results quota_left = quota_left - len(more_results) time.sleep(1) results = results[0:quota] return results
Returns a single list containing up to 'limit' Result objects Will keep requesting until quota is met Will also truncate extra results to return exactly the given quota
7,907
def keyPressEvent(self, event): if self.useDefaultKeystrokes() and self.isEditable(): if event.key() == Qt.Key_Delete: for item in self.selectedItems(): item.setRecordState(XOrbRecordItem.State.Removed) elif event.key() == Qt.Key_S and\ event.modifiers() == Qt.ControlModifier: self.commit() super(XOrbTreeWidget, self).keyPressEvent(event)
Listen for the delete key and check to see if this should auto set the remove property on the object. :param event | <QKeyPressEvent>
7,908
def _get_log_entries(self) -> List[Tuple[int, bytes, List[int], bytes]]: if self.is_error: return [] else: return sorted(itertools.chain( self._log_entries, *(child._get_log_entries() for child in self.children) ))
Return the log entries for this computation and its children. They are sorted in the same order they were emitted during the transaction processing, and include the sequential counter as the first element of the tuple representing every entry.
7,909
def process_module(self, node): if self.config.file_header: if sys.version_info[0] < 3: pattern = re.compile( + self.config.file_header, re.LOCALE | re.MULTILINE) else: pattern = re.compile( + self.config.file_header, re.MULTILINE) content = None with node.stream() as stream: content = stream.read().decode() matches = pattern.findall(content) if len(matches) != 1: self.add_message(, 1, args=self.config.file_header)
Process the astroid node stream.
7,910
def validate_cmap(val): from matplotlib.colors import Colormap try: return validate_str(val) except ValueError: if not isinstance(val, Colormap): raise ValueError( "Could not find a valid colormap!") return val
Validate a colormap Parameters ---------- val: str or :class:`mpl.colors.Colormap` Returns ------- str or :class:`mpl.colors.Colormap` Raises ------ ValueError
7,911
def f(x, depth1, depth2, dim=, first_batch_norm=True, stride=1, training=True, bottleneck=True, padding=): conv = CONFIG[dim][] with tf.variable_scope(, reuse=tf.AUTO_REUSE): if first_batch_norm: net = tf.layers.batch_normalization(x, training=training) net = tf.nn.relu(net) else: net = x if bottleneck: net = conv(net, depth1, 1, strides=stride, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth1, 3, strides=1, padding=padding, activation=None) net = tf.layers.batch_normalization(net, training=training) net = tf.nn.relu(net) net = conv(net, depth2, 1, strides=1, padding=padding, activation=None) else: net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None) net = tf.layers.batch_normalization(x, training=training) net = tf.nn.relu(net) net = conv(net, depth2, 3, strides=stride, padding=padding, activation=None) return net
Applies residual function for RevNet. Args: x: input tensor depth1: Number of output channels for the first and second conv layers. depth2: Number of output channels for the third conv layer. dim: '2d' if 2-dimensional, '3d' if 3-dimensional. first_batch_norm: Whether to keep the first batch norm layer or not. Typically used in the first RevNet block. stride: Stride for the first conv filter. Note that this particular RevNet architecture only varies the stride for the first conv filter. The stride for the second conv filter is always set to 1. training: True for train phase, False for eval phase. bottleneck: If true, apply bottleneck 1x1 down/up sampling. padding: Padding for each conv layer. Returns: Output tensor after applying residual function for RevNet.
7,912
def build(image, build_path, tag=None, build_args=None, fromline=None, args=[]): if tag: image = ":".join([image, tag]) bdir = tempfile.mkdtemp() os.system(.format(build_path, bdir)) if build_args: stdw = tempfile.NamedTemporaryFile(dir=bdir, mode=) with open("{}/Dockerfile".format(bdir)) as std: dfile = std.readlines() for line in dfile: if fromline and line.lower().startswith(): stdw.write(.format(fromline)) elif line.lower().startswith("cmd"): for arg in build_args: stdw.write(arg+"\n") stdw.write(line) else: stdw.write(line) stdw.flush() utils.xrun("docker build", args+["--force-rm","-f", stdw.name, "-t", image, bdir]) stdw.close() else: utils.xrun("docker build", args+["--force-rm", "-t", image, bdir]) os.system(.format(bdir))
build a docker image
7,913
def get_initial_arguments(request, cache_id=None): if cache_id is None: return None if initial_argument_location(): return cache.get(cache_id) return request.session[cache_id]
Extract initial arguments for the dash app
7,914
def get_coord_box(centre_x, centre_y, distance): return { : (centre_x - distance, centre_y + distance), : (centre_x + distance, centre_y + distance), : (centre_x - distance, centre_y - distance), : (centre_x + distance, centre_y - distance), }
Get the square boundary coordinates for a given centre and distance
7,915
def resolve_dependencies(self): return dict( [((key, self.data_dependencies[key]) if type(self.data_dependencies[key]) != DeferredDependency else (key, self.data_dependencies[key].resolve())) for key in self.data_dependencies])
evaluate each of the data dependencies of this build target, returns the resulting dict
7,916
def plot_color_legend(legend, horizontal=False, ax=None): import matplotlib.pyplot as plt import numpy as np t = np.array([np.array([x for x in legend])]) if ax is None: fig, ax = plt.subplots(1, 1) if horizontal: ax.imshow(t, interpolation=) ax.set_yticks([]) ax.set_xticks(np.arange(0, legend.shape[0])) t = ax.set_xticklabels(legend.index) else: t = t.reshape([legend.shape[0], 1, 3]) ax.imshow(t, interpolation=) ax.set_xticks([]) ax.set_yticks(np.arange(0, legend.shape[0])) t = ax.set_yticklabels(legend.index) return ax
Plot a pandas Series with labels and colors. Parameters ---------- legend : pandas.Series Pandas Series whose values are RGB triples and whose index contains categorical labels. horizontal : bool If True, plot horizontally. ax : matplotlib.axis Axis to plot on. Returns ------- ax : matplotlib.axis Plot axis.
7,917
def handle_legacy_tloc(line: str, position: int, tokens: ParseResults) -> ParseResults: log.log(5, , line, position) return tokens
Handle translocations that lack the ``fromLoc`` and ``toLoc`` entries.
7,918
def check_fam_for_samples(required_samples, source, gold): source_samples = set() with open(source, ) as input_file: for line in input_file: sample = tuple(line.rstrip("\r\n").split(" ")[:2]) if sample in required_samples: source_samples.add(sample) gold_samples = set() with open(gold, ) as input_file: for line in input_file: sample = tuple(line.rstrip("\r\n").split(" ")[:2]) if sample in required_samples: gold_samples.add(sample) logger.info(" - Found {} samples in source panel".format( len(source_samples), )) logger.info(" - Found {} samples in gold standard".format( len(gold_samples), )) if len(required_samples - (source_samples | gold_samples)) != 0: return False else: return True
Check fam files for required_samples.
7,919
def pipe_privateinput(context=None, _INPUT=None, conf=None, **kwargs): value = utils.get_input(context, conf) while True: yield value
An input that prompts the user for some text and yields it forever. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : unused conf : { 'name': {'value': 'parameter name'}, 'prompt': {'value': 'User prompt'}, 'default': {'value': 'default value'}, 'debug': {'value': 'debug value'} } Yields ------ _OUTPUT : text
7,920
def staticEval(self): for o in self.operands: o.staticEval() self.result._val = self.evalFn()
Recursively statistically evaluate result of this operator
7,921
def match_score(self, supported: ) -> int: if supported == self: return 100 desired_complete = self.prefer_macrolanguage().maximize() supported_complete = supported.prefer_macrolanguage().maximize() desired_triple = (desired_complete.language, desired_complete.script, desired_complete.region) supported_triple = (supported_complete.language, supported_complete.script, supported_complete.region) return 100 - raw_distance(desired_triple, supported_triple)
Suppose that `self` is the language that the user desires, and `supported` is a language that is actually supported. This method returns a number from 0 to 100 indicating how similar the supported language is (higher numbers are better). This is not a symmetric relation. The algorithm here is described (badly) in a Unicode technical report at http://unicode.org/reports/tr35/#LanguageMatching. If you find these results bothersome, take it up with Unicode, unless it's particular tweaks we implemented such as macrolanguage matching. See :func:`tag_match_score` for a function that works on strings, instead of requiring you to instantiate Language objects first. Further documentation and examples appear with that function.
7,922
def xyzlabel(labelx, labely, labelz): xlabel(labelx) ylabel(labely) zlabel(labelz)
Set all labels at once.
7,923
def lookup(self, topic): nsq.assert_valid_topic_name(topic) return self._request(, , fields={: topic})
Returns producers for a topic.
7,924
def sort(self): self.detections = sorted(self.detections, key=lambda d: d.detect_time) return self
Sort by detection time. .. rubric:: Example >>> family = Family( ... template=Template(name='a'), detections=[ ... Detection(template_name='a', detect_time=UTCDateTime(0) + 200, ... no_chans=8, detect_val=4.2, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0), ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0), ... Detection(template_name='a', detect_time=UTCDateTime(0) + 10, ... no_chans=8, detect_val=4.5, threshold=1.2, ... typeofdet='corr', threshold_type='MAD', ... threshold_input=8.0)]) >>> family[0].detect_time UTCDateTime(1970, 1, 1, 0, 3, 20) >>> family.sort()[0].detect_time UTCDateTime(1970, 1, 1, 0, 0)
7,925
def fill_rect(self, rect): check_int_err(lib.SDL_RenderFillRect(self._ptr, rect._ptr))
Fill a rectangle on the current rendering target with the drawing color. Args: rect (Rect): The destination rectangle, or None to fill the entire rendering target. Raises: SDLError: If an error is encountered.
7,926
def persist_booking(booking, user): if booking is not None: existing_bookings = Booking.objects.filter( user=user, booking_status__slug=).exclude( pk=booking.pk) existing_bookings.delete() booking.session = None booking.user = user booking.save()
Ties an in-progress booking from a session to a user when the user logs in. If we don't do this, the booking will be lost, because on a login, the old session will be deleted and a new one will be created. Since the booking has a FK to the session, it would be deleted as well when the user logs in. We assume that a user can only have one booking that is in-progress. Therefore we will delete any existing in-progress bookings of this user before tying the one from the session to the user. TODO: Find a more generic solution for this, as this assumes that there is a status called inprogress and that a user can only have one such booking. :param booking: The booking that should be tied to the user. :user: The user the booking should be tied to.
7,927
def resolve_args(self, args): def resolve(a): if isinstance(a, dict): _id = a.get(, None) if isinstance(a, (list, tuple)): return [resolve(i) for i in a] return a return [resolve(a) for a in args]
Resolve function call arguments that have object ids into instances of these objects
7,928
def get_uris(self, base_uri, filter_list=None): return { re.sub(r, base_uri, link.attrib[]) for link in self.parsedpage.get_nodes_by_selector() if in link.attrib and ( link.attrib[].startswith(base_uri) or link.attrib[].startswith() ) and not is_uri_to_be_filtered(link.attrib[], filter_list) }
Return a set of internal URIs.
7,929
def data(self, data=None): if data is not None: self.response_model.data = data return self.response_model.data
Set response data
7,930
def generate_field_spec(row): names = set() fields = [] for cell in row: name = column_alias(cell, names) field = { : name, : cell.column, : unicode(cell.type).lower(), : False, : False, : [] } if hasattr(cell.type, ): field[] = field[] = cell.type.format fields.append(field) return fields
Generate a set of metadata for each field/column in the data. This is loosely based on jsontableschema.
7,931
def execute(self, eopatch): for feature_type, feature_name, new_feature_name in self.feature: result = self._compute_hog(eopatch[feature_type][feature_name]) eopatch[feature_type][new_feature_name] = result[0] if self.visualize: eopatch[feature_type][self.visualize_name] = result[1] return eopatch
Execute computation of HoG features on input eopatch :param eopatch: Input eopatch :type eopatch: eolearn.core.EOPatch :return: EOPatch instance with new keys holding the HoG features and HoG image for visualisation. :rtype: eolearn.core.EOPatch
7,932
def findExtname(fimg, extname, extver=None): i = 0 extnum = None for chip in fimg: hdr = chip.header if in hdr: if hdr[].strip() == extname.upper(): if extver is None or hdr[] == extver: extnum = i break i += 1 return extnum
Returns the list number of the extension corresponding to EXTNAME given.
7,933
def _tidy2xhtml5(html): html = _io2string(html) html = _pre_tidy(html) xhtml5, errors =\ tidy_document(html, options={ : 0, : 1, }) return _post_tidy(xhtml5)
Tidy up a html4/5 soup to a parsable valid XHTML5. Requires tidy-html5 from https://github.com/w3c/tidy-html5 Installation: http://goo.gl/FG27n
7,934
def emboss_pepstats_parser(infile): with open(infile) as f: lines = f.read().split() info_dict = {} for l in lines[38:47]: info = l.split() cleaninfo = list(filter(lambda x: x != , info)) prop = cleaninfo[0] num = cleaninfo[2] percent = float(cleaninfo[-1]) / float(100) info_dict[ + prop.lower() + ] = percent return info_dict
Get dictionary of pepstats results. Args: infile: Path to pepstats outfile Returns: dict: Parsed information from pepstats TODO: Only currently parsing the bottom of the file for percentages of properties.
7,935
def _multiply(self, x1, x2, out): self.tspace._multiply(x1.tensor, x2.tensor, out.tensor)
Raw pointwise multiplication of two elements.
7,936
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs): assert "noise_shape" not in kwargs if broadcast_dims: shape = tf.shape(x) ndims = len(x.get_shape()) broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims] kwargs["noise_shape"] = [ 1 if i in broadcast_dims else shape[i] for i in range(ndims) ] return tf.nn.dropout(x, keep_prob, **kwargs)
Like tf.nn.dropout but takes broadcast_dims instead of noise_shape. Instead of specifying noise_shape, this function takes broadcast_dims - a list of dimension numbers in which noise_shape should be 1. The random keep/drop tensor has dimensionality 1 along these dimensions. Args: x: a floating point tensor. keep_prob: A scalar Tensor with the same type as x. The probability that each element is kept. broadcast_dims: an optional list of integers the dimensions along which to broadcast the keep/drop flags. **kwargs: keyword arguments to tf.nn.dropout other than "noise_shape". Returns: Tensor of the same shape as x.
7,937
def _main(): print( % get_platform()) print( % get_python_version()) print( % _get_default_scheme()) print() _print_dict(, get_paths()) print() _print_dict(, get_config_vars())
Display all information sysconfig detains.
7,938
def extension_by_source(source, mime_type): "Return the file extension used by this plugin" extension = source.plugin_name if extension: return extension if mime_type: return mime_type.split("/")[-1]
Return the file extension used by this plugin
7,939
def configure(self, **configs): configs = self._deprecate_configs(**configs) self._config = {} for key in self.DEFAULT_CONFIG: self._config[key] = configs.pop(key, self.DEFAULT_CONFIG[key]) if configs: raise KafkaConfigurationError( + str(list(configs.keys()))) if self._config[]: if not self._config[]: raise KafkaConfigurationError( ) if self._config[]: logger.info("Configuring consumer to auto-commit offsets") self._reset_auto_commit() if not self._config[]: raise KafkaConfigurationError( ) self._client = KafkaClient( self._config[], client_id=self._config[], timeout=(self._config[] / 1000.0) )
Configure the consumer instance Configuration settings can be passed to constructor, otherwise defaults will be used: Keyword Arguments: bootstrap_servers (list): List of initial broker nodes the consumer should contact to bootstrap initial cluster metadata. This does not have to be the full node list. It just needs to have at least one broker that will respond to a Metadata API Request. client_id (str): a unique name for this client. Defaults to 'kafka.consumer.kafka'. group_id (str): the name of the consumer group to join, Offsets are fetched / committed to this group name. fetch_message_max_bytes (int, optional): Maximum bytes for each topic/partition fetch request. Defaults to 1024*1024. fetch_min_bytes (int, optional): Minimum amount of data the server should return for a fetch request, otherwise wait up to fetch_wait_max_ms for more data to accumulate. Defaults to 1. fetch_wait_max_ms (int, optional): Maximum time for the server to block waiting for fetch_min_bytes messages to accumulate. Defaults to 100. refresh_leader_backoff_ms (int, optional): Milliseconds to backoff when refreshing metadata on errors (subject to random jitter). Defaults to 200. socket_timeout_ms (int, optional): TCP socket timeout in milliseconds. Defaults to 30*1000. auto_offset_reset (str, optional): A policy for resetting offsets on OffsetOutOfRange errors. 'smallest' will move to the oldest available message, 'largest' will move to the most recent. Any ofther value will raise the exception. Defaults to 'largest'. deserializer_class (callable, optional): Any callable that takes a raw message value and returns a deserialized value. Defaults to lambda msg: msg. auto_commit_enable (bool, optional): Enabling auto-commit will cause the KafkaConsumer to periodically commit offsets without an explicit call to commit(). Defaults to False. auto_commit_interval_ms (int, optional): If auto_commit_enabled, the milliseconds between automatic offset commits. Defaults to 60 * 1000. auto_commit_interval_messages (int, optional): If auto_commit_enabled, a number of messages consumed between automatic offset commits. Defaults to None (disabled). consumer_timeout_ms (int, optional): number of millisecond to throw a timeout exception to the consumer if no message is available for consumption. Defaults to -1 (dont throw exception). Configuration parameters are described in more detail at http://kafka.apache.org/documentation.html#highlevelconsumerapi
7,940
def update_datetime(value, range = None): range = range if range != None else 10 if range < 0: return value days = RandomFloat.next_float(-range, range) return value + datetime.timedelta(days)
Updates (drifts) a Date value within specified range defined :param value: a Date value to drift. :param range: (optional) a range in milliseconds. Default: 10 days :return: an updated DateTime value.
7,941
def _from_dict(cls, _dict): args = {} if in _dict: args[] = [ SpeechRecognitionResult._from_dict(x) for x in (_dict.get()) ] if in _dict: args[] = _dict.get() if in _dict: args[] = [ SpeakerLabelsResult._from_dict(x) for x in (_dict.get()) ] if in _dict: args[] = _dict.get() return cls(**args)
Initialize a SpeechRecognitionResults object from a json dictionary.
7,942
def match_value_to_text(self, text): if self.nme in text: res = 0.8 else: res = 0.2 return self.nme + + str(res) + + text
this is going to be the tricky bit - probably not possible to get the 'exact' rating for a value. Will need to do sentiment analysis of the text to see how it matches the rating. Even that sounds like it wont work - maybe a ML algorithm would do it, but that requires a large body of text already matched to values - and values aren't even defined as far as I have found. UPDATE - this could work if we assume values can be single words, eg tax=0.3, freedom=0.7, healthcare=0.3, welfare=0.3 etc
7,943
def match(record, config=None): if config is None: current_app.logger.debug() config = current_app.config[] try: algorithm, doc_type, index = config[], config[], config[] except KeyError as e: raise KeyError( % repr(e)) source = config.get(, []) match_deleted = config.get(, False) collections = config.get() if not (collections is None or ( isinstance(collections, (list, tuple)) and all(isinstance(collection, string_types) for collection in collections) )): raise ValueError( % repr(collections)) for i, step in enumerate(algorithm): try: queries = step[] except KeyError: raise KeyError( % i) validator = _get_validator(step.get()) for j, query in enumerate(queries): try: body = compile(query, record, collections=collections, match_deleted=match_deleted) except Exception as e: raise ValueError( % (j, i, repr(e))) if not body: continue current_app.logger.debug( % repr(body)) if source: result = es.search(index=index, doc_type=doc_type, body=body, _source=source) else: result = es.search(index=index, doc_type=doc_type, body=body) for hit in result[][]: if validator(record, hit): yield hit
Given a record, yield the records in INSPIRE most similar to it. This method can be used to detect if a record that we are ingesting as a submission or as an harvest is already present in the system, or to find out which record a reference should be pointing to.
7,944
def bool_assignment(arg, patterns=None): arg = str(arg) try: if patterns is None: patterns = ( (re.compile(r, flags=re.IGNORECASE), lambda x: x.lower() == ), (re.compile(r, flags=re.IGNORECASE), lambda x: x.lower() == ), (re.compile(r, flags=re.IGNORECASE), lambda x: x.lower() == ) ) if not arg: return else: for pattern, func in patterns: if pattern.match(arg): return func(arg) except Exception as e: raise e
Summary: Enforces correct bool argment assignment Arg: :arg (*): arg which must be interpreted as either bool True or False Returns: bool assignment | TYPE: bool
7,945
def fix_reference_url(url): new_url = url new_url = fix_url_bars_instead_of_slashes(new_url) new_url = fix_url_add_http_if_missing(new_url) new_url = fix_url_replace_tilde(new_url) try: rfc3987.parse(new_url, rule="URI") return new_url except ValueError: return url
Used to parse an incorect url to try to fix it with the most common ocurrences for errors. If the fixed url is still incorrect, it returns ``None``. Returns: String containing the fixed url or the original one if it could not be fixed.
7,946
def _browse(c): index = join(c.sphinx.target, c.sphinx.target_file) c.run("open {0}".format(index))
Open build target's index.html in a browser (using 'open').
7,947
def _prepare_script(self, dest_dir, program): script_name = ExecutorFiles.PROCESS_SCRIPT dest_file = os.path.join(dest_dir, script_name) with open(dest_file, ) as dest_file_obj: dest_file_obj.write(program) os.chmod(dest_file, 0o700) return script_name
Copy the script into the destination directory. :param dest_dir: The target directory where the script will be saved. :param program: The script text to be saved. :return: The name of the script file. :rtype: str
7,948
def mclennan_tourky(g, init=None, epsilon=1e-3, max_iter=200, full_output=False): r try: N = g.N except: raise TypeError() if N < 2: raise NotImplementedError() if init is None: init = (0,) * N try: l = len(init) except TypeError: raise TypeError() if l != N: raise ValueError( .format(N=N) ) indptr = np.empty(N+1, dtype=int) indptr[0] = 0 indptr[1:] = np.cumsum(g.nums_actions) x_init = _flatten_action_profile(init, indptr) is_approx_fp = lambda x: _is_epsilon_nash(x, g, epsilon, indptr) x_star, converged, num_iter = \ _compute_fixed_point_ig(_best_response_selection, x_init, max_iter, verbose=0, print_skip=1, is_approx_fp=is_approx_fp, g=g, indptr=indptr) NE = _get_action_profile(x_star, indptr) if not full_output: return NE res = NashResult(NE=NE, converged=converged, num_iter=num_iter, max_iter=max_iter, init=init, epsilon=epsilon) return NE, res
r""" Find one mixed-action epsilon-Nash equilibrium of an N-player normal form game by the fixed point computation algorithm by McLennan and Tourky [1]_. Parameters ---------- g : NormalFormGame NormalFormGame instance. init : array_like(int or array_like(float, ndim=1)), optional Initial action profile, an array of N objects, where each object must be an iteger (pure action) or an array of floats (mixed action). If None, default to an array of zeros (the zero-th action for each player). epsilon : scalar(float), optional(default=1e-3) Value of epsilon-optimality. max_iter : scalar(int), optional(default=100) Maximum number of iterations. full_output : bool, optional(default=False) If False, only the computed Nash equilibrium is returned. If True, the return value is `(NE, res)`, where `NE` is the Nash equilibrium and `res` is a `NashResult` object. Returns ------- NE : tuple(ndarray(float, ndim=1)) Tuple of computed Nash equilibrium mixed actions. res : NashResult Object containing information about the computation. Returned only when `full_output` is True. See `NashResult` for details. Examples -------- Consider the following version of 3-player "anti-coordination" game, where action 0 is a safe action which yields payoff 1, while action 1 yields payoff :math:`v` if no other player plays 1 and payoff 0 otherwise: >>> N = 3 >>> v = 2 >>> payoff_array = np.empty((2,)*n) >>> payoff_array[0, :] = 1 >>> payoff_array[1, :] = 0 >>> payoff_array[1].flat[0] = v >>> g = NormalFormGame((Player(payoff_array),)*N) >>> print(g) 3-player NormalFormGame with payoff profile array: [[[[ 1., 1., 1.], [ 1., 1., 2.]], [[ 1., 2., 1.], [ 1., 0., 0.]]], [[[ 2., 1., 1.], [ 0., 1., 0.]], [[ 0., 0., 1.], [ 0., 0., 0.]]]] This game has a unique symmetric Nash equilibrium, where the equilibrium action is given by :math:`(p^*, 1-p^*)` with :math:`p^* = 1/v^{1/(N-1)}`: >>> p_star = 1/(v**(1/(N-1))) >>> [p_star, 1 - p_star] [0.7071067811865475, 0.29289321881345254] Obtain an approximate Nash equilibrium of this game by `mclennan_tourky`: >>> epsilon = 1e-5 # Value of epsilon-optimality >>> NE = mclennan_tourky(g, epsilon=epsilon) >>> print(NE[0], NE[1], NE[2], sep='\n') [ 0.70710754 0.29289246] [ 0.70710754 0.29289246] [ 0.70710754 0.29289246] >>> g.is_nash(NE, tol=epsilon) True Additional information is returned if `full_output` is set True: >>> NE, res = mclennan_tourky(g, epsilon=epsilon, full_output=True) >>> res.converged True >>> res.num_iter 18 References ---------- .. [1] A. McLennan and R. Tourky, "From Imitation Games to Kakutani," 2006.
7,949
def stage_all(self): LOGGER.info() self.repo.git.add(A=True)
Stages all changed and untracked files
7,950
def init(): loop = asyncio.get_event_loop() if loop.is_running(): raise Exception("You must initialize the Ray async API by calling " "async_api.init() or async_api.as_future(obj) before " "the event loop starts.") else: asyncio.get_event_loop().run_until_complete(_async_init())
Initialize synchronously.
7,951
def from_api(cls, api): ux = TodoUX(api) from .pseudorpc import PseudoRpc rpc = PseudoRpc(api) return cls({ViaAPI: api, ViaUX: ux, ViaRPC: rpc})
create an application description for the todo app, that based on the api can use either tha api or the ux for interaction
7,952
def describe_field(k, v, timestamp_parser=default_timestamp_parser): def bq_schema_field(name, bq_type, mode): return {"name": name, "type": bq_type, "mode": mode} if isinstance(v, list): if len(v) == 0: raise Exception( "Canfields'] = schema_from_record(v, timestamp_parser) except InvalidTypeException as e: raise InvalidTypeException("%s.%s" % (k, e.key), e.value) return field
Given a key representing a column name and value representing the value stored in the column, return a representation of the BigQuery schema element describing that field. Raise errors if invalid value types are provided. Parameters ---------- k : Union[str, unicode] Key representing the column v : Union[str, unicode, int, float, datetime, object] Value mapped to by `k` Returns ------- object Describing the field Raises ------ Exception If invalid value types are provided. Examples -------- >>> describe_field("username", "Bob") {"name": "username", "type": "string", "mode": "nullable"} >>> describe_field("users", [{"username": "Bob"}]) {"name": "users", "type": "record", "mode": "repeated", "fields": [{"name":"username","type":"string","mode":"nullable"}]}
7,953
def urlretrieve(url, filename=None, reporthook=None, data=None): url_type, path = splittype(url) with contextlib.closing(urlopen(url, data)) as fp: headers = fp.info() if url_type == "file" and not filename: return os.path.normpath(path), headers if filename: tfp = open(filename, ) else: tfp = tempfile.NamedTemporaryFile(delete=False) filename = tfp.name _url_tempfiles.append(filename) with tfp: result = filename, headers bs = 1024*8 size = -1 read = 0 blocknum = 0 if "content-length" in headers: size = int(headers["Content-Length"]) if reporthook: reporthook(blocknum, bs, size) while True: block = fp.read(bs) if not block: break read += len(block) tfp.write(block) blocknum += 1 if reporthook: reporthook(blocknum, bs, size) if size >= 0 and read < size: raise ContentTooShortError( "retrieval incomplete: got only %i out of %i bytes" % (read, size), result) return result
Retrieve a URL into a temporary location on disk. Requires a URL argument. If a filename is passed, it is used as the temporary file location. The reporthook argument should be a callable that accepts a block number, a read size, and the total file size of the URL target. The data argument should be valid URL encoded data. If a filename is passed and the URL points to a local resource, the result is a copy from local file to new file. Returns a tuple containing the path to the newly created data file as well as the resulting HTTPMessage object.
7,954
def restore(self): if not self._snapshot: return yield from self.set_muted(self._snapshot[]) yield from self.set_volume(self._snapshot[]) yield from self.set_stream(self._snapshot[]) self.callback() _LOGGER.info(, self.friendly_name)
Restore snapshotted state.
7,955
def to_XML(self, xml_declaration=True, xmlns=True): root_node = self._to_DOM() if xmlns: xmlutils.annotate_with_XMLNS(root_node, OBSERVATION_XMLNS_PREFIX, OBSERVATION_XMLNS_URL) return xmlutils.DOM_node_to_XML(root_node, xml_declaration)
Dumps object fields to an XML-formatted string. The 'xml_declaration' switch enables printing of a leading standard XML line containing XML version and encoding. The 'xmlns' switch enables printing of qualified XMLNS prefixes. :param XML_declaration: if ``True`` (default) prints a leading XML declaration line :type XML_declaration: bool :param xmlns: if ``True`` (default) prints full XMLNS prefixes :type xmlns: bool :returns: an XML-formatted string
7,956
def include(self, spec, *, basePath=None, operationId_mapping=None, name=None): data = self._file_loader.load(spec) if basePath is None: basePath = data.get(, ) if name is not None: d = dict(data) d[] = basePath self._swagger_data[name] = d swagger_data = {k: v for k, v in data.items() if k != } swagger_data[] = basePath for url, methods in data.get(, {}).items(): url = basePath + url methods = dict(methods) location_name = methods.pop(self.NAME, None) parameters = methods.pop(, []) for method, body in methods.items(): if method == self.VIEW: view = utils.import_obj(body) view.add_routes(self, prefix=url, encoding=self._encoding) continue body = dict(body) if parameters: body[] = parameters + \ body.get(, []) handler = body.pop(self.HANDLER, None) name = location_name or handler if not handler: op_id = body.get() if op_id and operationId_mapping: handler = operationId_mapping.get(op_id) if handler: name = location_name or op_id if handler: validate = body.pop(self.VALIDATE, self._default_validate) self.add_route( method.upper(), utils.url_normolize(url), handler=handler, name=name, swagger_data=body, validate=validate, ) self._swagger_data[basePath] = swagger_data for route in self.routes(): if isinstance(route, SwaggerRoute) and not route.is_built: route.build_swagger_data(self._file_loader)
Adds a new specification to a router :param spec: path to specification :param basePath: override base path specify in specification :param operationId_mapping: mapping for handlers :param name: name to access original spec
7,957
def _get_u16(self, msb, lsb): buf = struct.pack(, self._get_u8(msb), self._get_u8(lsb)) return int(struct.unpack(, buf)[0])
Convert 2 bytes into an unsigned int.
7,958
def validate_param_name(name, param_type): if not re.match(r, name): raise ValueError( % (param_type, name))
Validate that the name follows posix conventions for env variables.
7,959
def _set_adj_type(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 8}, u: {: 2}, u: {: 4}, u: {: 1}, u: {: 0}, u: {: 16}},), is_leaf=True, yang_name="adj-type", rest_name="adj-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace=, defining_module=, yang_type=, is_config=False) except (TypeError, ValueError): raise ValueError({ : , : "brocade-isis-operational:isis-adj-type", : , }) self.__adj_type = t if hasattr(self, ): self._set()
Setter method for adj_type, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_type (isis-adj-type) If this variable is read-only (config: false) in the source YANG file, then _set_adj_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_adj_type() directly. YANG Description: Type of ISIS Adjacency
7,960
def purge(self): while not self.stopped.isSet(): self.stopped.wait(timeout=defines.EXCHANGE_LIFETIME) self._messageLayer.purge()
Clean old transactions
7,961
def build_authorization_arg(authdict): vallist = [] for k in authdict.keys(): vallist += [ % (k,authdict[k])] return +.join(vallist)
Create an "Authorization" header value from an authdict (created by generate_response()).
7,962
def error(code, message, **kwargs): assert code in Logger._error_code_to_exception exc_type, domain = Logger._error_code_to_exception[code] exc = exc_type(message, **kwargs) Logger._log(code, exc.message, ERROR, domain) raise exc
Call this to raise an exception and have it stored in the journal
7,963
def register_view(self, view): super(TopToolBarUndockedWindowController, self).register_view(view) view[].connect(, self.on_redock_button_clicked)
Called when the View was registered
7,964
def _render_content(self, content, **settings): result = [] columns = settings[self.SETTING_COLUMNS] (columns, content) = self.table_format(columns, content) if settings[self.SETTING_FLAG_ENUMERATE]: (columns, content) = self.table_enumerate(columns, content) dimensions = self.table_measure(columns, content) sb = {k: settings[k] for k in (self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)} result.append(self.fmt_border(dimensions, , **sb)) if settings[self.SETTING_FLAG_HEADER]: s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)} s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_HEADER_FORMATING] result.append(self.fmt_row_header(columns, dimensions, **s)) result.append(self.fmt_border(dimensions, , **sb)) for row in content: s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)} s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_TEXT_FORMATING] result.append(self.fmt_row(columns, dimensions, row, **s)) result.append(self.fmt_border(dimensions, , **sb)) return result
Perform widget rendering, but do not print anything.
7,965
def to_cell_table(self, merged=True): new_rows = [] for row_index, row in enumerate(self.rows(CellMode.cooked)): new_row = [] for col_index, cell_value in enumerate(row): new_row.append(Cell(cell_value, self.get_note((col_index, row_index)))) new_rows.append(new_row) if merged: for cell_low, cell_high in self.merged_cell_ranges(): anchor_cell = new_rows[cell_low[1]][cell_low[0]] for row_index in range(cell_low[1], cell_high[1]): for col_index in range(cell_low[0], cell_high[0]): try: new_rows[row_index][col_index] = anchor_cell.copy() except IndexError: pass return new_rows
Returns a list of lists of Cells with the cooked value and note for each cell.
7,966
def convert_dcm2nii(input_dir, output_dir, filename): if not op.exists(input_dir): raise IOError(.format(input_dir)) if not op.exists(output_dir): raise IOError(.format(output_dir)) tmpdir = tempfile.TemporaryDirectory(prefix=) arguments = .format(tmpdir.name) try: call_out = call_dcm2nii(input_dir, arguments) except: raise else: log.info(.format(input_dir)) filenames = glob(op.join(tmpdir.name, )) cleaned_filenames = remove_dcm2nii_underprocessed(filenames) filepaths = [] for srcpath in cleaned_filenames: dstpath = op.join(output_dir, filename) realpath = copy_w_plus(srcpath, dstpath) filepaths.append(realpath) basename = op.basename(remove_ext(srcpath)) aux_files = set(glob(op.join(tmpdir.name, .format(basename)))) - \ set(glob(op.join(tmpdir.name, .format(basename)))) for aux_file in aux_files: aux_dstpath = copy_w_ext(aux_file, output_dir, remove_ext(op.basename(realpath))) filepaths.append(aux_dstpath) return filepaths
Call MRICron's `dcm2nii` to convert the DICOM files inside `input_dir` to Nifti and save the Nifti file in `output_dir` with a `filename` prefix. Parameters ---------- input_dir: str Path to the folder that contains the DICOM files output_dir: str Path to the folder where to save the NifTI file filename: str Output file basename Returns ------- filepaths: list of str List of file paths created in `output_dir`.
7,967
def main(**options): application = Application(**options) if not application.run(): sys.exit(1) return application
Spline loc tool.
7,968
def list(context, sort, limit, where, verbose): result = product.list(context, sort=sort, limit=limit, where=where) utils.format_output(result, context.format, verbose=verbose)
list(context, sort, limit, where, verbose) List all products. >>> dcictl product list :param string sort: Field to apply sort :param integer limit: Max number of rows to return :param string where: An optional filter criteria :param boolean verbose: Display verbose output
7,969
def write_block_data(self, addr, cmd, vals): self._set_addr(addr) data = ffi.new("union i2c_smbus_data *") list_to_smbus_data(data, vals) if SMBUS.i2c_smbus_access(self._fd, int2byte(SMBUS.I2C_SMBUS_WRITE), ffi.cast("__u8", cmd), SMBUS.I2C_SMBUS_BLOCK_DATA, data): raise IOError(ffi.errno)
write_block_data(addr, cmd, vals) Perform SMBus Write Block Data transaction.
7,970
def _direct_set(self, key, value): dict.__setitem__(self, key, value) return value
_direct_set - INTERNAL USE ONLY!!!! Directly sets a value on the underlying dict, without running through the setitem logic
7,971
def shuffle_into_deck(self): return self.game.cheat_action(self, [actions.Shuffle(self.controller, self)])
Shuffle the card into the controller's deck
7,972
def to_json(data): return json.dumps(data, default=lambda x: x.__dict__, sort_keys=True, indent=4)
Return data as a JSON string.
7,973
def metadata_updated_on(item): ts = item[][0][] ts = str_to_datetime(ts) ts = ts.replace(tzinfo=dateutil.tz.tzutc()) return ts.timestamp()
Extracts and coverts the update time from a Bugzilla item. The timestamp is extracted from 'delta_ts' field. This date is converted to UNIX timestamp format. Due Bugzilla servers ignore the timezone on HTTP requests, it will be ignored during the conversion, too. :param item: item generated by the backend :returns: a UNIX timestamp
7,974
def private_method(func): def func_wrapper(*args, **kwargs): outer_frame = inspect.stack()[1][0] if not in outer_frame.f_locals or outer_frame.f_locals[] is not args[0]: raise RuntimeError( % (args[0].__class__.__name__, func.__name__)) return func(*args, **kwargs) return func_wrapper
Decorator for making an instance method private.
7,975
def _add_versions(samples): samples[0]["versions"] = {"tools": programs.write_versions(samples[0]["dirs"], samples[0]["config"]), "data": provenancedata.write_versions(samples[0]["dirs"], samples)} return samples
Add tool and data versions to the summary.
7,976
def _sibpath(path, sibling): return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)
Return the path to a sibling of a file in the filesystem. This is useful in conjunction with the special C{__file__} attribute that Python provides for modules, so modules can load associated resource files. (Stolen from twisted.python.util)
7,977
def convert_concat(params, w_name, scope_name, inputs, layers, weights, names): print() concat_nodes = [layers[i] for i in inputs] if len(concat_nodes) == 1: layers[scope_name] = concat_nodes[0] return if names == : tf_name = + random_string(5) elif names == : tf_name = w_name else: tf_name = w_name + str(random.random()) cat = keras.layers.Concatenate(name=tf_name, axis=params[]) layers[scope_name] = cat(concat_nodes)
Convert concatenation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
7,978
def fit(self, X, y=None, sample_weight=None): if self.normalize: X = normalize(X) random_state = check_random_state(self.random_state) self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = spherical_k_means( X, n_clusters=self.n_clusters, sample_weight=sample_weight, init=self.init, n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose, tol=self.tol, random_state=random_state, copy_x=self.copy_x, n_jobs=self.n_jobs, return_n_iter=True, ) return self
Compute k-means clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) y : Ignored not used, present here for API consistency by convention. sample_weight : array-like, shape (n_samples,), optional The weights for each observation in X. If None, all observations are assigned equal weight (default: None)
7,979
def get_lines(self): with open(self.path, "r") as data: self.lines = data.readlines() return self.lines
Gets lines in file :return: Lines in file
7,980
def reset(self, total_size=None): self.root = FakeDirectory(self.path_separator, filesystem=self) self.cwd = self.root.name self.open_files = [] self._free_fd_heap = [] self._last_ino = 0 self._last_dev = 0 self.mount_points = {} self.add_mount_point(self.root.name, total_size) self._add_standard_streams()
Remove all file system contents and reset the root.
7,981
def build_path(G, node, endpoints, path): for successor in G.successors(node): if successor not in path: path.append(successor) if successor not in endpoints: path = build_path(G, successor, endpoints, path) else: path.append(path[0]) return path
Recursively build a path of nodes until you hit an endpoint node. Parameters ---------- G : networkx multidigraph node : int the current node to start from endpoints : set the set of all nodes in the graph that are endpoints path : list the list of nodes in order in the path so far Returns ------- paths_to_simplify : list
7,982
def operates_on(self, qubits: Iterable[raw_types.Qid]) -> bool: return any(q in qubits for q in self.qubits)
Determines if the moment has operations touching the given qubits. Args: qubits: The qubits that may or may not be touched by operations. Returns: Whether this moment has operations involving the qubits.
7,983
def nested_genobject(self, metadata, attr, datastore): for key, value in sorted(datastore[attr].datastore.items()): if in str(type(value)): metadata[attr][key] = dict() for nested_key, nested_datastore in sorted(value.datastore.items()): metadata[attr][key][nested_key] = dict() if in str(type(nested_datastore)): metadata[attr][key].update( self.nested_genobject(metadata[attr][key], nested_key, value.datastore)) else: metadata[attr][key][nested_key] = nested_datastore else: try: if key not in self.unwanted_keys: metadata[attr][key] = value except AttributeError: print(, attr) return metadata
Allow for the printing of nested GenObjects :param metadata: Nested dictionary containing the metadata. Will be further populated by this method :param attr: Current attribute being evaluated. Must be a GenObject e.g. sample.general :param datastore: The dictionary of the current attribute. Will be converted to nested dictionaries :return: Updated nested metadata dictionary with all GenObjects safely converted to dictionaries
7,984
def truncate(s, max_len=20, ellipsis=): r if s is None: return None elif isinstance(s, basestring): return s[:min(len(s), max_len)] + ellipsis if len(s) > max_len else elif isinstance(s, Mapping): truncated_str = str(dict(islice(viewitems(s), max_len))) else: truncated_str = str(list(islice(s, max_len))) return truncated_str[:-1] + if len(s) > max_len else truncated_str
r"""Return string at most `max_len` characters or sequence elments appended with the `ellipsis` characters >>> truncate(OrderedDict(zip(list('ABCDEFGH'), range(8))), 1) "{'A': 0..." >>> truncate(list(range(5)), 3) '[0, 1, 2...' >>> truncate(np.arange(5), 3) '[0, 1, 2...' >>> truncate('Too verbose for its own good.', 11) 'Too verbose...'
7,985
def dependent_hosted_number_orders(self): if self._dependent_hosted_number_orders is None: self._dependent_hosted_number_orders = DependentHostedNumberOrderList( self._version, signing_document_sid=self._solution[], ) return self._dependent_hosted_number_orders
Access the dependent_hosted_number_orders :returns: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList :rtype: twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderList
7,986
def check_if_ok_to_update(self): current_time = int(time.time()) last_refresh = self.last_refresh if last_refresh is None: last_refresh = 0 if current_time >= (last_refresh + self.refresh_rate): return True return False
Check if it is ok to perform an http request.
7,987
def multiplication_circuit(nbit, vartype=dimod.BINARY): if nbit < 1: raise ValueError("num_multiplier_bits, num_multiplicand_bits must be positive integers") num_multiplier_bits = num_multiplicand_bits = nbit csp = ConstraintSatisfactionProblem(vartype) a = {i: % i for i in range(nbit)} b = {j: % j for j in range(nbit)} p = {k: % k for k in range(nbit + nbit)} AND = defaultdict(dict) SUM = defaultdict(dict) CARRY = defaultdict(dict) for i in range(num_multiplier_bits): for j in range(num_multiplicand_bits): ai = a[i] bj = b[j] if i == 0 and j == 0: gate = fulladder_gate([inputs[0], inputs[1], inputs[2], sumout, carryout], vartype=vartype, name=name) csp.add_constraint(gate) return csp
Multiplication circuit constraint satisfaction problem. A constraint satisfaction problem that represents the binary multiplication :math:`ab=p`, where the multiplicands are binary variables of length `nbit`; for example, :math:`a_0 + 2a_1 + 4a_2 +... +2^ma_{nbit}`. The square below shows a graphic representation of the circuit:: ________________________________________________________________________________ | and20 and10 and00 | | | | | | | and21 add11──and11 add01──and01 | | | |β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜|β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜| | | | and22 add12──and12 add02──and02 | | | | |β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜|β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜| | | | | add13─────────add03 | | | | | β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜| | | | | | | p5 p4 p3 p2 p1 p0 | -------------------------------------------------------------------------------- Args: nbit (int): Number of bits in the multiplicands. vartype (Vartype, optional, default='BINARY'): Variable type. Accepted input values: * Vartype.SPIN, 'SPIN', {-1, 1} * Vartype.BINARY, 'BINARY', {0, 1} Returns: CSP (:obj:`.ConstraintSatisfactionProblem`): CSP that is satisfied when variables :math:`a,b,p` are assigned values that correctly solve binary multiplication :math:`ab=p`. Examples: This example creates a multiplication circuit CSP that multiplies two 3-bit numbers, which is then formulated as a binary quadratic model (BQM). It fixes the multiplacands as :math:`a=5, b=6` (:math:`101` and :math:`110`) and uses a simulated annealing sampler to find the product, :math:`p=30` (:math:`111100`). >>> import dwavebinarycsp >>> from dwavebinarycsp.factories.csp.circuits import multiplication_circuit >>> import neal >>> csp = multiplication_circuit(3) >>> bqm = dwavebinarycsp.stitch(csp) >>> bqm.fix_variable('a0', 1); bqm.fix_variable('a1', 0); bqm.fix_variable('a2', 1) >>> bqm.fix_variable('b0', 1); bqm.fix_variable('b1', 1); bqm.fix_variable('b2', 0) >>> sampler = neal.SimulatedAnnealingSampler() >>> response = sampler.sample(bqm) >>> p = next(response.samples(n=1, sorted_by='energy')) >>> print(p['p0'], p['p1'], p['p2'], p['p3'], p['p4'], p['p5']) # doctest: +SKIP 1 1 1 1 0 0
7,988
def _construct_deutsch_jozsa_circuit(self): dj_prog = Program() dj_prog.inst(X(self.ancillas[0]), H(self.ancillas[0])) dj_prog.inst([H(qubit) for qubit in self.computational_qubits]) oracle_prog = Program() oracle_prog.defgate(ORACLE_GATE_NAME, self.unitary_matrix) scratch_bit = self.ancillas[1] qubits_for_funct = [scratch_bit] + self.computational_qubits oracle_prog.inst(tuple([ORACLE_GATE_NAME] + qubits_for_funct)) dj_prog += oracle_prog dj_prog.inst(CNOT(self._qubits[0], self.ancillas[0])) dj_prog += oracle_prog.dagger() dj_prog.inst([H(qubit) for qubit in self.computational_qubits]) return dj_prog
Builds the Deutsch-Jozsa circuit. Which can determine whether a function f mapping :math:`\{0,1\}^n \to \{0,1\}` is constant or balanced, provided that it is one of them. :return: A program corresponding to the desired instance of Deutsch Jozsa's Algorithm. :rtype: Program
7,989
def get_ambient_sensor_data(self): resource = .format(self.device_id) history_event = self.publish_and_get_event(resource) if history_event is None: return None properties = history_event.get() self._ambient_sensor_data = \ ArloBaseStation._decode_sensor_data(properties) return self._ambient_sensor_data
Refresh ambient sensor history
7,990
def clean(self): super(EnterpriseCustomerIdentityProviderAdminForm, self).clean() provider_id = self.cleaned_data.get(, None) enterprise_customer = self.cleaned_data.get(, None) if provider_id is None or enterprise_customer is None: return identity_provider = utils.get_identity_provider(provider_id) if not identity_provider: message = _( "The specified Identity Provider does not exist. For more " "information, contact a system administrator.", ) logger.exception(message) raise ValidationError(message) if identity_provider and identity_provider.site != enterprise_customer.site: raise ValidationError( _( "The site for the selected identity provider " "({identity_provider_site}) does not match the site for " "this enterprise customer ({enterprise_customer_site}). " "To correct this problem, select a site that has a domain " "of , or update the identity " "provider to ." ).format( enterprise_customer_site=enterprise_customer.site, identity_provider_site=identity_provider.site, ), )
Final validations of model fields. 1. Validate that selected site for enterprise customer matches with the selected identity provider's site.
7,991
def process_directory_statements_sorted_by_pmid(directory_name): s_dict = defaultdict(list) mp = process_directory(directory_name, lazy=True) for statement in mp.iter_statements(): s_dict[statement.evidence[0].pmid].append(statement) return s_dict
Processes a directory filled with CSXML files, first normalizing the character encoding to utf-8, and then processing into INDRA statements sorted by pmid. Parameters ---------- directory_name : str The name of a directory filled with csxml files to process Returns ------- pmid_dict : dict A dictionary mapping pmids to a list of statements corresponding to that pmid
7,992
def get_lowest_numeric_score_metadata(self): metadata = dict(self._mdata[]) metadata.update({: self._my_map[]}) return Metadata(**metadata)
Gets the metadata for the lowest numeric score. return: (osid.Metadata) - metadata for the lowest numeric score *compliance: mandatory -- This method must be implemented.*
7,993
def unpickle(self, parent): self.parent = parent self._unpickle_collection(self.members) self._unpickle_collection(self.dependencies) self._unpickle_collection(self.types) self._unpickle_collection(self.executables) self._unpickle_collection(self._parameters) self.unpickle_docs()
Sets the parent pointer references for the module *and* all of its child classes that also have pointer references.
7,994
def parse_log_entry(text): text = text.strip() if well_formed_log_entry_p(text): return LogEntry(text) else: def use_value(obj): return obj def reparse(text): return parse_log_entry(text) with restarts(use_value, reparse) as call: return call(signal, MalformedLogEntryError(text))
This function does all real job on log line parsing. it setup two cases for restart parsing if a line with wrong format was found. Restarts: - use_value: just retuns an object it was passed. This can be any value. - reparse: calls `parse_log_entry` again with other text value. Beware, this call can lead to infinite recursion.
7,995
def _to_reddit_list(arg): if (isinstance(arg, six.string_types) or not ( hasattr(arg, "__getitem__") or hasattr(arg, "__iter__"))): return six.text_type(arg) else: return .join(six.text_type(a) for a in arg)
Return an argument converted to a reddit-formatted list. The returned format is a comma deliminated list. Each element is a string representation of an object. Either given as a string or as an object that is then converted to its string representation.
7,996
def flg(self, name, help, abbrev=None): abbrev = abbrev or + name[0] longname = + name.replace(, ) self._add(name, abbrev, longname, action=, help=help)
Describe a flag
7,997
def _client_properties(): return { : , : % (platform.python_version(), platform.python_implementation()), : { : True, : True, : True, : True, : True, }, : , : __version__ }
AMQPStorm Client Properties. :rtype: dict
7,998
def update(self): stats = self.get_init_value() if import_error_tag: return self.stats if self.input_method == : try: mds = MdStat() stats = mds.get_stats()[] except Exception as e: logger.debug("Can not grab RAID stats (%s)" % e) return self.stats elif self.input_method == : pass self.stats = stats return self.stats
Update RAID stats using the input method.
7,999
def get_songs()->Iterator: with session_withcommit() as session: val = session.query(songs).all() for row in val: yield row
Return songs that have the fingerprinted flag set TRUE (1).