code
stringlengths 51
2.38k
| docstring
stringlengths 4
15.2k
|
---|---|
def _load_matcher(self) -> None:
for id_key in self._rule_lst:
if self._rule_lst[id_key].active:
pattern_lst = [a_pattern.spacy_token_lst for a_pattern in self._rule_lst[id_key].patterns]
for spacy_rule_id, spacy_rule in enumerate(itertools.product(*pattern_lst)):
self._matcher.add(self._construct_key(id_key, spacy_rule_id), None, list(spacy_rule))
|
Add constructed spacy rule to Matcher
|
def reject_recursive_repeats(to_wrap):
to_wrap.__already_called = {}
@functools.wraps(to_wrap)
def wrapped(*args):
arg_instances = tuple(map(id, args))
thread_id = threading.get_ident()
thread_local_args = (thread_id,) + arg_instances
if thread_local_args in to_wrap.__already_called:
raise ValueError('Recursively called %s with %r' % (to_wrap, args))
to_wrap.__already_called[thread_local_args] = True
try:
wrapped_val = to_wrap(*args)
finally:
del to_wrap.__already_called[thread_local_args]
return wrapped_val
return wrapped
|
Prevent simple cycles by returning None when called recursively with same instance
|
def __is_subgraph_planar(graph):
num_nodes = graph.num_nodes()
num_edges = graph.num_edges()
if num_nodes < 5:
return True
if num_edges > 3*(num_nodes - 2):
return False
return kocay_planarity_test(graph)
|
Internal function to determine if a subgraph is planar.
|
def merge_element_data(dest, sources, use_copy=True):
if dest is not None:
ret = dest.copy()
else:
ret = {}
if use_copy:
sources = copy.deepcopy(sources)
for s in sources:
if 'electron_shells' in s:
if 'electron_shells' not in ret:
ret['electron_shells'] = []
ret['electron_shells'].extend(s['electron_shells'])
if 'ecp_potentials' in s:
if 'ecp_potentials' in ret:
raise RuntimeError('Cannot overwrite existing ECP')
ret['ecp_potentials'] = s['ecp_potentials']
ret['ecp_electrons'] = s['ecp_electrons']
if 'references' in s:
if 'references' not in ret:
ret['references'] = []
for ref in s['references']:
if not ref in ret['references']:
ret['references'].append(ref)
return ret
|
Merges the basis set data for an element from multiple sources
into dest.
The destination is not modified, and a (shallow) copy of dest is returned
with the data from sources added.
If use_copy is True, then the data merged into dest will be a (deep)
copy of that found in sources. Otherwise, data may be shared between dest
and sources
|
def _intersection(A,B):
intersection = []
for i in A:
if i in B:
intersection.append(i)
return intersection
|
A simple function to find an intersection between two arrays.
@type A: List
@param A: First List
@type B: List
@param B: Second List
@rtype: List
@return: List of Intersections
|
def format_info(raw):
logging.debug(_('raw[0]: %s'), raw[0])
results, sense = raw
new = '\n'.join(
'{} {} {} {}'.format(
i[0], sense.kind_id_to_name(i[1]),
sense.file_id_to_name(i[2]).lower(),
i[3] + ' ' if i[3] else '').strip()
for i in results)
return new
|
Format a string representing the information
concerning the name.
|
def external(func):
def f(*args, **kwargs):
return func(*args, **kwargs)
f.external = True
f.__doc__ = func.__doc__
return f
|
Mark function as external.
:param func:
:return:
|
def _iupac_ambiguous_equal(ambig_base, unambig_base):
iupac_translation = {
'A': 'A',
'C': 'C',
'G': 'G',
'T': 'T',
'U': 'U',
'R': 'AG',
'Y': 'CT',
'S': 'GC',
'W': 'AT',
'K': 'GT',
'M': 'AC',
'B': 'CGT',
'D': 'AGT',
'H': 'ACT',
'V': 'ACG',
'N': 'ACGT',
'-': '-'
}
for i in (ambig_base, unambig_base):
if not len(i) == 1:
raise ValueError("only one base may be passed.")
return unambig_base.upper() in iupac_translation[ambig_base.upper()]
|
Tests two bases for equality, accounting for IUPAC ambiguous DNA
ambiguous base may be IUPAC ambiguous, unambiguous must be one of ACGT
|
def _dict_key_priority(s):
if isinstance(s, Hook):
return _priority(s._schema) - 0.5
if isinstance(s, Optional):
return _priority(s._schema) + 0.5
return _priority(s)
|
Return priority for a given key object.
|
def publish(self, topic, data, defer=None, block=True, timeout=None,
raise_error=True):
result = AsyncResult()
conn = self._get_connection(block=block, timeout=timeout)
try:
self._response_queues[conn].append(result)
conn.publish(topic, data, defer=defer)
finally:
self._put_connection(conn)
if raise_error:
return result.get()
return result
|
Publish a message to the given topic.
:param topic: the topic to publish to
:param data: bytestring data to publish
:param defer: duration in milliseconds to defer before publishing
(requires nsq 0.3.6)
:param block: wait for a connection to become available before
publishing the message. If block is `False` and no connections
are available, :class:`~gnsq.errors.NSQNoConnections` is raised
:param timeout: if timeout is a positive number, it blocks at most
``timeout`` seconds before raising
:class:`~gnsq.errors.NSQNoConnections`
:param raise_error: if ``True``, it blocks until a response is received
from the nsqd server, and any error response is raised. Otherwise
an :class:`~gevent.event.AsyncResult` is returned
|
def dict_from_node(node, recursive=False):
dict = {}
for snode in node:
if len(snode) > 0:
if recursive:
value = dict_from_node(snode, True)
else:
value = len(snode)
elif snode.text is not None:
value = snode.text
else:
value = u''
if snode.tag in dict.keys():
if type(dict[snode.tag]) is list:
dict[snode.tag].append(value)
else:
dict[snode.tag] = [ dict[snode.tag], value ]
else:
dict[snode.tag] = value
return dict
|
Converts ElementTree node to a dictionary.
Parameters
----------
node : ElementTree node
recursive : boolean
If recursive=False, the value of any field with children will be the
number of children.
Returns
-------
dict : nested dictionary.
Tags as keys and values as values. Sub-elements that occur multiple
times in an element are contained in a list.
|
def confirmation_pdf(self, confirmation_id):
return self._create_get_request(resource=CONFIRMATIONS, billomat_id=confirmation_id, command=PDF)
|
Opens a pdf of a confirmation
:param confirmation_id: the confirmation id
:return: dict
|
def last_executed_query(self, cursor, sql, params):
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
|
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
|
def engine(self):
pid = os.getpid()
conn = SQLAlchemyTarget._engine_dict.get(self.connection_string)
if not conn or conn.pid != pid:
engine = sqlalchemy.create_engine(
self.connection_string,
connect_args=self.connect_args,
echo=self.echo
)
SQLAlchemyTarget._engine_dict[self.connection_string] = self.Connection(engine, pid)
return SQLAlchemyTarget._engine_dict[self.connection_string].engine
|
Return an engine instance, creating it if it doesn't exist.
Recreate the engine connection if it wasn't originally created
by the current process.
|
def write_sample_sheet(path, accessions, names, celfile_urls, sel=None):
with open(path, 'wb') as ofh:
writer = csv.writer(ofh, dialect='excel-tab',
lineterminator=os.linesep,
quoting=csv.QUOTE_NONE)
writer.writerow(['Accession', 'Name', 'CEL file', 'CEL file URL'])
n = len(names)
if sel is None:
sel = range(n)
for i in sel:
cf = celfile_urls[i].split('/')[-1]
writer.writerow([accessions[i], names[i], cf, celfile_urls[i]])
|
Write the sample sheet.
|
def catalog_split_yaml(self, **kwargs):
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.catalog_split_yaml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath
|
return the name of a catalog split yaml file
|
def train(self, content_objs, idx_labels):
if len(set([lab[0] for lab in idx_labels])) <= 1:
return None
fcs = [fc for _, fc in content_objs]
feature_names = vectorizable_features(fcs)
dis = dissimilarities(feature_names, fcs)
phi_dicts, labels = [], []
for coref_value, i, j in idx_labels:
labels.append(coref_value)
phi_dict = dict([(name, dis[name][i,j]) for name in feature_names])
phi_dicts.append(phi_dict)
vec = dict_vector()
training_data = vec.fit_transform(phi_dicts)
model = LogisticRegression(class_weight='auto', penalty='l1')
model.fit(training_data, labels)
self.feature_weights = dict([(name, model.coef_[0][i])
for i, name in enumerate(feature_names)])
return feature_names, model, vec
|
Trains and returns a model using sklearn.
If there are new labels to add, they can be added, returns an
sklearn model which can be used for prediction and getting
features.
This method may return ``None`` if there is insufficient
training data to produce a model.
:param labels: Ground truth data.
:type labels: list of ``({-1, 1}, index1, index2)``.
|
def draw_address(canvas):
business_details = (
u'COMPANY NAME LTD',
u'STREET',
u'TOWN',
U'COUNTY',
U'POSTCODE',
U'COUNTRY',
u'',
u'',
u'Phone: +00 (0) 000 000 000',
u'Email: [email protected]',
u'Website: www.example.com',
u'Reg No: 00000000'
)
canvas.setFont('Helvetica', 9)
textobject = canvas.beginText(13 * cm, -2.5 * cm)
for line in business_details:
textobject.textLine(line)
canvas.drawText(textobject)
|
Draws the business address
|
def on_output_path_textChanged(self):
output_path = self.output_path.text()
output_not_xml_msg = tr('output file is not .tif')
if output_path and not output_path.endswith('.tif'):
self.warning_text.add(output_not_xml_msg)
elif output_path and output_not_xml_msg in self.warning_text:
self.warning_text.remove(output_not_xml_msg)
self.update_warning()
|
Action when output file name is changed.
|
def _log_function(self, handler):
if handler.get_status() < 400:
log_method = request_log.info
elif handler.get_status() < 500:
log_method = request_log.warning
else:
log_method = request_log.error
for i in settings['LOGGING_IGNORE_URLS']:
if handler.request.uri.startswith(i):
log_method = request_log.debug
break
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
|
Override Application.log_function so that what to log can be controlled.
|
def loadSettings(self, groupName=None):
groupName = groupName if groupName else self.settingsGroupName
settings = QtCore.QSettings()
logger.info("Reading {!r} from: {}".format(groupName, settings.fileName()))
settings.beginGroup(groupName)
self.clear()
try:
for key in settings.childKeys():
if key.startswith('item'):
dct = ast.literal_eval(settings.value(key))
regItem = self._itemClass.createFromDict(dct)
self.registerItem(regItem)
finally:
settings.endGroup()
|
Reads the registry items from the persistent settings store.
|
def _signal_handler(self, signum, frame):
if self._options.config:
with open(self._options.config, "w") as cfg:
yaml.dump(self._home_assistant_config(), cfg)
print(
"Dumped home assistant configuration at",
self._options.config)
self._connection.close()
sys.exit(0)
|
Method called when handling signals
|
def permitted_actions(self, user, obj=None):
try:
if not self._obj_ok(obj):
raise InvalidPermissionObjectException
return user.permset_tree.permitted_actions(obj)
except ObjectDoesNotExist:
return []
|
Determine list of permitted actions for an object or object
pattern.
:param user: The user to test.
:type user: ``User``
:param obj: A function mapping from action names to object
paths to test.
:type obj: callable
:returns: ``list(tutelary.engine.Action)`` -- permitted actions.
|
def _get_variable_names(arr):
if VARIABLELABEL in arr.dims:
return arr.coords[VARIABLELABEL].tolist()
else:
return arr.name
|
Return the variable names of an array
|
def info(gandi, resource):
output_keys = ['name', 'state', 'size', 'type', 'id', 'dc', 'vm',
'profile', 'kernel', 'cmdline']
resource = sorted(tuple(set(resource)))
vms = dict([(vm['id'], vm) for vm in gandi.iaas.list()])
datacenters = gandi.datacenter.list()
result = []
for num, item in enumerate(resource):
if num:
gandi.separator_line()
disk = gandi.disk.info(item)
output_disk(gandi, disk, datacenters, vms, [], output_keys)
result.append(disk)
return result
|
Display information about a disk.
Resource can be a disk name or ID
|
def readFile(self, pathToFile):
fd = open(pathToFile, "rb")
data = fd.read()
fd.close()
return data
|
Returns data from a file.
@type pathToFile: str
@param pathToFile: Path to the file.
@rtype: str
@return: The data from file.
|
def glyphs2ufo(options):
if options.output_dir is None:
options.output_dir = os.path.dirname(options.glyphs_file) or "."
if options.designspace_path is None:
options.designspace_path = os.path.join(
options.output_dir,
os.path.basename(os.path.splitext(options.glyphs_file)[0]) + ".designspace",
)
glyphsLib.build_masters(
options.glyphs_file,
options.output_dir,
options.instance_dir,
designspace_path=options.designspace_path,
minimize_glyphs_diffs=options.no_preserve_glyphsapp_metadata,
propagate_anchors=options.propagate_anchors,
normalize_ufos=options.normalize_ufos,
create_background_layers=options.create_background_layers,
generate_GDEF=options.generate_GDEF,
store_editor_state=not options.no_store_editor_state,
)
|
Converts a Glyphs.app source file into UFO masters and a designspace file.
|
def iter_tours(tourfile, frames=1):
fp = open(tourfile)
i = 0
for row in fp:
if row[0] == '>':
label = row[1:].strip()
if label.startswith("GA"):
pf, j, score = label.split("-", 2)
j = int(j)
else:
j = 0
i += 1
else:
if j % frames != 0:
continue
tour, tour_o = separate_tour_and_o(row)
yield i, label, tour, tour_o
fp.close()
|
Extract tours from tourfile. Tourfile contains a set of contig
configurations, generated at each iteration of the genetic algorithm. Each
configuration has two rows, first row contains iteration id and score,
second row contains list of contigs, separated by comma.
|
def diff(self, mail_a, mail_b):
return len(''.join(unified_diff(
mail_a.body_lines, mail_b.body_lines,
fromfile='a', tofile='b',
fromfiledate='', tofiledate='',
n=0, lineterm='\n')))
|
Return difference in bytes between two mails' normalized body.
TODO: rewrite the diff algorithm to not rely on naive unified diff
result parsing.
|
def commit(self, id, impreq):
schema = RequestSchema()
json = self.service.encode(schema, impreq)
schema = RequestSchema()
resp = self.service.post(self.base+str(id)+'/', json=json)
return self.service.decode(schema, resp)
|
Commit a staged import.
:param id: Staged import ID as an int.
:param impreq: :class:`imports.Request <imports.Request>` object
:return: :class:`imports.Request <imports.Request>` object
:rtype: imports.Request
|
def deploy_from_template(self, context, deploy_action, cancellation_context):
deploy_from_template_model = self.resource_model_parser.convert_to_resource_model(
attributes=deploy_action.actionParams.deployment.attributes,
resource_model_type=vCenterVMFromTemplateResourceModel)
data_holder = DeployFromTemplateDetails(deploy_from_template_model, deploy_action.actionParams.appName)
deploy_result_action = self.command_wrapper.execute_command_with_connection(
context,
self.deploy_command.execute_deploy_from_template,
data_holder,
cancellation_context,
self.folder_manager)
deploy_result_action.actionId = deploy_action.actionId
return deploy_result_action
|
Deploy From Template Command, will deploy vm from template
:param CancellationContext cancellation_context:
:param ResourceCommandContext context: the context of the command
:param DeployApp deploy_action:
:return DeployAppResult deploy results
|
def authenticate_with_serviceaccount(reactor, **kw):
config = KubeConfig.from_service_account(**kw)
policy = https_policy_from_config(config)
token = config.user["token"]
agent = HeaderInjectingAgent(
_to_inject=Headers({u"authorization": [u"Bearer {}".format(token)]}),
_agent=Agent(reactor, contextFactory=policy),
)
return agent
|
Create an ``IAgent`` which can issue authenticated requests to a
particular Kubernetes server using a service account token.
:param reactor: The reactor with which to configure the resulting agent.
:param bytes path: The location of the service account directory. The
default should work fine for normal use within a container.
:return IAgent: An agent which will authenticate itself to a particular
Kubernetes server and which will verify that server or refuse to
interact with it.
|
def get_region(self, x,z):
if (x,z) not in self.regions:
if (x,z) in self.regionfiles:
self.regions[(x,z)] = region.RegionFile(self.regionfiles[(x,z)])
else:
self.regions[(x,z)] = region.RegionFile()
self.regions[(x,z)].loc = Location(x=x,z=z)
return self.regions[(x,z)]
|
Get a region using x,z coordinates of a region. Cache results.
|
def fields(self):
if self._fields is None:
self._fields = FieldList(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['sid'],
)
return self._fields
|
Access the fields
:returns: twilio.rest.autopilot.v1.assistant.task.field.FieldList
:rtype: twilio.rest.autopilot.v1.assistant.task.field.FieldList
|
def get_allowed_domain(url, allow_subdomains=True):
if allow_subdomains:
return re.sub(re_www, '', re.search(r'[^/]+\.[^/]+', url).group(0))
else:
return re.search(re_domain, UrlExtractor.get_allowed_domain(url)).group(0)
|
Determines the url's domain.
:param str url: the url to extract the allowed domain from
:param bool allow_subdomains: determines wether to include subdomains
:return str: subdomains.domain.topleveldomain or domain.topleveldomain
|
def unpack_struct(self, struct):
size = struct.size
offset = self.offset
if self.data:
avail = len(self.data) - offset
else:
avail = 0
if avail < size:
raise UnpackException(struct.format, size, avail)
self.offset = offset + size
return struct.unpack_from(self.data, offset)
|
unpacks the given struct from the underlying buffer and returns
the results. Will raise an UnpackException if there is not
enough data to satisfy the format of the structure
|
def _init_file(self):
self.keyring_key = self._get_new_password()
self.set_password('keyring-setting',
'password reference',
'password reference value')
self._write_config_value('keyring-setting',
'scheme',
self.scheme)
self._write_config_value('keyring-setting',
'version',
self.version)
|
Initialize a new password file and set the reference password.
|
def _cfg(key, default=None):
root_cfg = __salt__.get('config.get', __opts__.get)
kms_cfg = root_cfg('aws_kms', {})
return kms_cfg.get(key, default)
|
Return the requested value from the aws_kms key in salt configuration.
If it's not set, return the default.
|
def setScales(self,scales=None,term_num=None):
if scales==None:
for term_i in range(self.n_terms):
n_scales = self.vd.getTerm(term_i).getNumberScales()
self.vd.getTerm(term_i).setScales(SP.array(SP.randn(n_scales)))
elif term_num==None:
assert scales.shape[0]==self.vd.getNumberScales(), 'incompatible shape'
index = 0
for term_i in range(self.n_terms):
index1 = index+self.vd.getTerm(term_i).getNumberScales()
self.vd.getTerm(term_i).setScales(scales[index:index1])
index = index1
else:
assert scales.shape[0]==self.vd.getTerm(term_num).getNumberScales(), 'incompatible shape'
self.vd.getTerm(term_num).setScales(scales)
|
get random initialization of variances based on the empirical trait variance
Args:
scales: if scales==None: set them randomly,
else: set scales to term_num (if term_num==None: set to all terms)
term_num: set scales to term_num
|
def _get_hanging_wall_coeffs_rrup(self, dists):
fhngrrup = np.ones(len(dists.rrup))
idx = dists.rrup > 0.0
fhngrrup[idx] = (dists.rrup[idx] - dists.rjb[idx]) / dists.rrup[idx]
return fhngrrup
|
Returns the hanging wall rrup term defined in equation 13
|
def get_phase(n_samples, des_mask, asc_mask):
import numpy
phase = numpy.zeros(n_samples, dtype=int)
phase[asc_mask] = 1
phase[des_mask] = -1
return phase
|
Get the directional phase sign for each sample in depths
Args
----
n_samples: int
Length of output phase array
des_mask: numpy.ndarray, shape (n,)
Boolean mask of values where animal is descending
asc_mask: numpy.ndarray, shape(n,)
Boolean mask of values where animal is ascending
Returns
-------
phase: numpy.ndarray, shape (n,)
Signed integer array values representing animal's dive phase
*Phases*:
* 0: neither ascending/descending
* 1: ascending
* -1: descending.
|
def _get_api_id(self, event_properties):
api_id = event_properties.get("RestApiId")
if isinstance(api_id, dict) and "Ref" in api_id:
api_id = api_id["Ref"]
return api_id
|
Get API logical id from API event properties.
Handles case where API id is not specified or is a reference to a logical id.
|
def setup_dir(self):
cd = self.opts.cd or self.config['crony'].get('directory')
if cd:
self.logger.debug(f'Adding cd to {cd}')
self.cmd = f'cd {cd} && {self.cmd}'
|
Change directory for script if necessary.
|
def add(self, key):
encodedKey = json.dumps(key)
with self.connect() as conn:
with doTransaction(conn):
sql = 'INSERT IGNORE INTO ' + self.table + ' (name) VALUES (%s)'
return insertSQL(conn, sql, args=[encodedKey])
|
add key to the namespace. it is fine to add a key multiple times.
|
def __start_connection(self, context, node, ccallbacks=None):
_logger.debug("Creating connection object: CONTEXT=[%s] NODE=[%s]",
context, node)
c = nsq.connection.Connection(
context,
node,
self.__identify,
self.__message_handler,
self.__quit_ev,
ccallbacks,
ignore_quit=self.__connection_ignore_quit)
g = gevent.spawn(c.run)
timeout_s = nsq.config.client.NEW_CONNECTION_NEGOTIATE_TIMEOUT_S
if c.connected_ev.wait(timeout_s) is False:
_logger.error("New connection to server [%s] timed-out. Cleaning-"
"up thread.", node)
g.kill()
g.join()
raise EnvironmentError("Connection to server [%s] failed." %
(node,))
self.__connections.append((node, c, g))
|
Start a new connection, and manage it from a new greenlet.
|
def getYadisXRD(xrd_tree):
xrd = None
for xrd in xrd_tree.findall(xrd_tag):
pass
if xrd is None:
raise XRDSError('No XRD present in tree')
return xrd
|
Return the XRD element that should contain the Yadis services
|
def _exponential_timeout_generator(initial, maximum, multiplier, deadline):
if deadline is not None:
deadline_datetime = datetime_helpers.utcnow() + datetime.timedelta(
seconds=deadline
)
else:
deadline_datetime = datetime.datetime.max
timeout = initial
while True:
now = datetime_helpers.utcnow()
yield min(
timeout,
maximum,
float((deadline_datetime - now).seconds),
)
timeout = timeout * multiplier
|
A generator that yields exponential timeout values.
Args:
initial (float): The initial timeout.
maximum (float): The maximum timeout.
multiplier (float): The multiplier applied to the timeout.
deadline (float): The overall deadline across all invocations.
Yields:
float: A timeout value.
|
def raise_error(error_type: str) -> None:
try:
error = next((v for k, v in ERROR_CODES.items() if k in error_type))
except StopIteration:
error = AirVisualError
raise error(error_type)
|
Raise the appropriate error based on error message.
|
def _button_plus_clicked(self, n):
self._button_save.setEnabled(True)
self.insert_colorpoint(self._colorpoint_list[n][0],
self._colorpoint_list[n][1],
self._colorpoint_list[n][2])
self._build_gui()
|
Create a new colorpoint.
|
def section(self, section):
if not isinstance(self._container, ConfigUpdater):
raise ValueError("Sections can only be added at section level!")
if isinstance(section, str):
section = Section(section, container=self._container)
elif not isinstance(section, Section):
raise ValueError("Parameter must be a string or Section type!")
if section.name in [block.name for block in self._container
if isinstance(block, Section)]:
raise DuplicateSectionError(section.name)
self._container.structure.insert(self._idx, section)
self._idx += 1
return self
|
Creates a section block
Args:
section (str or :class:`Section`): name of section or object
Returns:
self for chaining
|
def normalized_start(self):
namespaces_after_key = list(self.make_datastore_query().Run(limit=1))
if not namespaces_after_key:
return None
namespace_after_key = namespaces_after_key[0].name() or ''
return NamespaceRange(namespace_after_key,
self.namespace_end,
_app=self.app)
|
Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exclude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore.
|
def __locate_scubainit(self):
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path))
|
Determine path to scubainit binary
|
def open_http(self, url, data=None):
return self._open_generic_http(http_client.HTTPConnection, url, data)
|
Use HTTP protocol.
|
def find_packages():
packages = ['pyctools']
for root, dirs, files in os.walk(os.path.join('src', 'pyctools')):
package = '.'.join(root.split(os.sep)[1:])
for name in dirs:
packages.append(package + '.' + name)
return packages
|
Walk source directory tree and convert each sub directory to a
package name.
|
def capacityForRole(self,role):
if isinstance(role, DanceRole):
role_id = role.id
else:
role_id = role
eventRoles = self.eventrole_set.filter(capacity__gt=0)
if eventRoles.count() > 0 and role_id not in [x.role.id for x in eventRoles]:
return 0
elif eventRoles.count() > 0:
return eventRoles.get(role=role).capacity
if isinstance(self,Series):
try:
availableRoles = self.classDescription.danceTypeLevel.danceType.roles.all()
if availableRoles.count() > 0 and role_id not in [x.id for x in availableRoles]:
return 0
elif availableRoles.count() > 0 and self.capacity:
return ceil(self.capacity / availableRoles.count())
except ObjectDoesNotExist as e:
logger.error('Error in calculating capacity for role: %s' % e)
return self.capacity
|
Accepts a DanceRole object and determines the capacity for that role at this event.this
Since roles are not always custom specified for events, this looks for the set of
available roles in multiple places, and only returns the overall capacity of the event
if roles are not found elsewhere.
|
def create_and_run_collector(document, options):
collector = None
if not options.report == 'off':
collector = Collector()
collector.store.configure(document)
Event.configure(collector_queue=collector.queue)
collector.start()
return collector
|
Create and run collector process for report data.
|
def time_until_expiration(self):
if self.password_expires_at is not None:
expiration_date = datetime.datetime.strptime(
self.password_expires_at, "%Y-%m-%dT%H:%M:%S.%f")
return expiration_date - datetime.datetime.now()
|
Returns the number of remaining days until user's password expires.
Calculates the number days until the user must change their password,
once the password expires the user will not able to log in until an
admin changes its password.
|
def validate_port(port):
if not isinstance(port, (str, int)):
raise TypeError(f'port must be an integer or string: {port}')
if isinstance(port, str) and port.isdigit():
port = int(port)
if isinstance(port, int) and 0 < port <= 65535:
return port
raise ValueError(f'invalid port: {port}')
|
Validate port and return it as an integer.
A string, or its representation as an integer, is accepted.
|
def default_token_implementation(self, user_id):
user = self.get(user_id)
if not user:
msg = 'No user with such id [{}]'
raise x.JwtNoUser(msg.format(user_id))
if user._token:
try:
self.decode_token(user._token)
return user._token
except jwt.exceptions.ExpiredSignatureError:
pass
from_now = datetime.timedelta(seconds=self.jwt_lifetime)
expires = datetime.datetime.utcnow() + from_now
issued = datetime.datetime.utcnow()
not_before = datetime.datetime.utcnow()
data = dict(
exp=expires,
nbf=not_before,
iat=issued,
user_id=user_id
)
token = jwt.encode(data, self.jwt_secret, algorithm=self.jwt_algo)
string_token = token.decode('utf-8')
user._token = string_token
self.save(user)
return string_token
|
Default JWT token implementation
This is used by default for generating user tokens if custom
implementation was not configured. The token will contain user_id and
expiration date. If you need more information added to the token,
register your custom implementation.
It will load a user to see if token is already on file. If it is, the
existing token will be checked for expiration and returned if valid.
Otherwise a new token will be generated and persisted. This can be used
to perform token revocation.
:param user_id: int, user id
:return: string
|
def get_all_tags_of_offer(self, offer_id):
return self._iterate_through_pages(
get_function=self.get_tags_of_offer_per_page,
resource=OFFER_TAGS,
**{'offer_id': offer_id}
)
|
Get all tags of offer
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param offer_id: the offer id
:return: list
|
def get_state_map(meta_graph, state_ops, unsupported_state_ops,
get_tensor_by_name):
state_map = {}
for node in meta_graph.graph_def.node:
if node.op in state_ops:
tensor_name = node.name + ":0"
tensor = get_tensor_by_name(tensor_name)
num_outputs = len(tensor.op.outputs)
if num_outputs != 1:
raise ValueError("Stateful op %s has %d outputs, expected 1" %
(node.op, num_outputs))
state_map[tensor_name] = tensor
if node.op in unsupported_state_ops:
raise ValueError("Unsupported stateful op: %s" % node.op)
return state_map
|
Returns a map from tensor names to tensors that hold the state.
|
def _void_array_to_list(restuple, _func, _args):
shape = (restuple.e.len, 1)
array_size = np.prod(shape)
mem_size = 8 * array_size
array_str_e = string_at(restuple.e.data, mem_size)
array_str_n = string_at(restuple.n.data, mem_size)
ls_e = np.frombuffer(array_str_e, float, array_size).tolist()
ls_n = np.frombuffer(array_str_n, float, array_size).tolist()
return ls_e, ls_n
|
Convert the FFI result to Python data structures
|
def register(self, event, fn):
self._callbacks.setdefault(event, []).append(fn)
return fn
|
Tell the object to run `fn` whenever a message of type `event` is
received.
|
def safe_stat(path, timeout=1, cmd=None):
"Use threads and a subproc to bodge a timeout on top of filesystem access"
global safe_stat_process
if cmd is None:
cmd = ['/usr/bin/stat']
cmd.append(path)
def target():
global safe_stat_process
logger.debug('Stat thread started')
safe_stat_process = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE)
_results = safe_stat_process.communicate()
logger.debug('Stat thread finished')
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
safe_stat_process.terminate()
thread.join()
return safe_stat_process.returncode == 0
|
Use threads and a subproc to bodge a timeout on top of filesystem access
|
def rotateX(self, angle):
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
y = self.y * cosa - self.z * sina
z = self.y * sina + self.z * cosa
return Point3D(self.x, y, z)
|
Rotates the point around the X axis by the given angle in degrees.
|
def missing(self, dst):
for inst in six.itervalues(dst):
if inst.status != REMOVED:
inst.status = REMOVED
inst.save()
|
Mark all missing plugins, that exists in database, but are not
registered.
|
def open(self):
if self.handle is None:
self.handle = fits.open(self.fname, mode='readonly')
if self.extn:
if len(self.extn) == 1:
hdu = self.handle[self.extn[0]]
else:
hdu = self.handle[self.extn[0],self.extn[1]]
else:
hdu = self.handle[0]
if isinstance(hdu,fits.hdu.compressed.CompImageHDU):
self.compress = True
return hdu
|
Opens the file for subsequent access.
|
def setup_locale(lc_all: str,
first_weekday: int = None,
*,
lc_collate: str = None,
lc_ctype: str = None,
lc_messages: str = None,
lc_monetary: str = None,
lc_numeric: str = None,
lc_time: str = None) -> str:
if first_weekday is not None:
calendar.setfirstweekday(first_weekday)
locale.setlocale(locale.LC_COLLATE, lc_collate or lc_all)
locale.setlocale(locale.LC_CTYPE, lc_ctype or lc_all)
locale.setlocale(locale.LC_MESSAGES, lc_messages or lc_all)
locale.setlocale(locale.LC_MONETARY, lc_monetary or lc_all)
locale.setlocale(locale.LC_NUMERIC, lc_numeric or lc_all)
locale.setlocale(locale.LC_TIME, lc_time or lc_all)
return locale.setlocale(locale.LC_ALL, lc_all)
|
Shortcut helper to setup locale for backend application.
:param lc_all: Locale to use.
:param first_weekday:
Weekday for start week. 0 for Monday, 6 for Sunday. By default: None
:param lc_collate: Collate locale to use. By default: ``<lc_all>``
:param lc_ctype: Ctype locale to use. By default: ``<lc_all>``
:param lc_messages: Messages locale to use. By default: ``<lc_all>``
:param lc_monetary: Monetary locale to use. By default: ``<lc_all>``
:param lc_numeric: Numeric locale to use. By default: ``<lc_all>``
:param lc_time: Time locale to use. By default: ``<lc_all>``
|
def delete(self):
try:
self.revert()
except errors.ChangelistError:
pass
self._connection.run(['change', '-d', str(self._change)])
|
Reverts all files in this changelist then deletes the changelist from perforce
|
def remove_mapping(agent, prefix, ip):
return _broadcast(agent, RemoveMappingManager,
RecordType.record_A, prefix, ip)
|
Removes a mapping with a contract.
It has high latency but gives some kind of guarantee.
|
def register_directory(self, directory, parent, ensure_uniqueness=False):
if ensure_uniqueness:
if self.get_directory_nodes(directory):
raise foundations.exceptions.ProgrammingError("{0} | '{1}' directory is already registered!".format(
self.__class__.__name__, directory))
LOGGER.debug("> Registering '{0}' directory.".format(directory))
row = parent.children_count()
self.beginInsertRows(self.get_node_index(parent), row, row)
directory_node = DirectoryNode(name=os.path.basename(directory),
path=directory,
parent=parent)
self.endInsertRows()
self.directory_registered.emit(directory_node)
return directory_node
|
Registers given directory in the Model.
:param directory: Directory to register.
:type directory: unicode
:param parent: DirectoryNode parent.
:type parent: GraphModelNode
:param ensure_uniqueness: Ensure registrar uniqueness.
:type ensure_uniqueness: bool
:return: DirectoryNode.
:rtype: DirectoryNode
|
def logout(self):
from flask_login import logout_user, current_user
if not current_user.is_authenticated:
return True
user = current_user
events.logout_event.send(user)
logout_user()
app = current_app._get_current_object()
identity_changed.send(app, identity=AnonymousIdentity())
return True
|
Logout user and emit event.
|
def _build(self, inputs, prev_state):
next_state = self._model(prev_state)
return next_state, next_state
|
Connects the ModelRNN module into the graph.
If this is not the first time the module has been connected to the graph,
the Tensors provided as input_ and state must have the same final
dimension, in order for the existing variables to be the correct size for
their corresponding multiplications. The batch size may differ for each
connection.
Args:
inputs: Tensor input to the ModelRNN (ignored).
prev_state: Tensor of size `model.output_size`.
Returns:
output: Tensor of size `model.output_size`.
next_state: Tensor of size `model.output_size`.
|
def clear(self):
for key in self.conn.keys():
self.conn.delete(key)
|
Helper for clearing all the keys in a database. Use with
caution!
|
def push(self, read_time, next_resume_token):
deletes, adds, updates = Watch._extract_changes(
self.doc_map, self.change_map, read_time
)
updated_tree, updated_map, appliedChanges = self._compute_snapshot(
self.doc_tree, self.doc_map, deletes, adds, updates
)
if not self.has_pushed or len(appliedChanges):
key = functools.cmp_to_key(self._comparator)
keys = sorted(updated_tree.keys(), key=key)
self._snapshot_callback(
keys,
appliedChanges,
datetime.datetime.fromtimestamp(read_time.seconds, pytz.utc),
)
self.has_pushed = True
self.doc_tree = updated_tree
self.doc_map = updated_map
self.change_map.clear()
self.resume_token = next_resume_token
|
Assembles a new snapshot from the current set of changes and invokes
the user's callback. Clears the current changes on completion.
|
def node_label_absent(name, node, **kwargs):
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
labels = __salt__['kubernetes.node_labels'](node, **kwargs)
if name not in labels:
ret['result'] = True if not __opts__['test'] else None
ret['comment'] = 'The label does not exist'
return ret
if __opts__['test']:
ret['comment'] = 'The label is going to be deleted'
ret['result'] = None
return ret
__salt__['kubernetes.node_remove_label'](
node_name=node,
label_name=name,
**kwargs)
ret['result'] = True
ret['changes'] = {
'kubernetes.node_label': {
'new': 'absent', 'old': 'present'}}
ret['comment'] = 'Label removed from node'
return ret
|
Ensures that the named label is absent from the node.
name
The name of the label
node
The name of the node
|
def show_wbridges(self):
grp = self.getPseudoBondGroup("Water Bridges-%i" % self.tid, associateWith=[self.model])
grp.lineWidth = 3
for i, wbridge in enumerate(self.plcomplex.waterbridges):
c = grp.newPseudoBond(self.atoms[wbridge.water_id], self.atoms[wbridge.acc_id])
c.color = self.colorbyname('cornflower blue')
self.water_ids.append(wbridge.water_id)
b = grp.newPseudoBond(self.atoms[wbridge.don_id], self.atoms[wbridge.water_id])
b.color = self.colorbyname('cornflower blue')
self.water_ids.append(wbridge.water_id)
if wbridge.protisdon:
self.bs_res_ids.append(wbridge.don_id)
else:
self.bs_res_ids.append(wbridge.acc_id)
|
Visualizes water bridges
|
def get_snapshots_filename(impl, working_dir):
snapshots_filename = impl.get_virtual_chain_name() + ".snapshots"
return os.path.join(working_dir, snapshots_filename)
|
Get the absolute path to the chain's consensus snapshots file.
|
def name_to_system_object(self, name):
if isinstance(name, str):
if self.allow_name_referencing:
name = name
else:
raise NameError('System.allow_name_referencing is set to False, cannot convert string to name')
elif isinstance(name, Object):
name = str(name)
return self.namespace.get(name, None)
|
Give SystemObject instance corresponding to the name
|
def write(self, data):
self._check_can_write()
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += len(data)
return len(data)
|
Write a bytes object to the file.
Returns the number of uncompressed bytes written, which is
always len(data). Note that due to buffering, the file on disk
may not reflect the data written until close() is called.
|
def get_all_subnets(self, subnet_ids=None, filters=None):
params = {}
if subnet_ids:
self.build_list_params(params, subnet_ids, 'SubnetId')
if filters:
i = 1
for filter in filters:
params[('Filter.%d.Name' % i)] = filter[0]
params[('Filter.%d.Value.1' % i)] = filter[1]
i += 1
return self.get_list('DescribeSubnets', params, [('item', Subnet)])
|
Retrieve information about your Subnets. You can filter results to
return information only about those Subnets that match your search
parameters. Otherwise, all Subnets associated with your account
are returned.
:type subnet_ids: list
:param subnet_ids: A list of strings with the desired Subnet ID's
:type filters: list of tuples
:param filters: A list of tuples containing filters. Each tuple
consists of a filter key and a filter value.
Possible filter keys are:
- *state*, the state of the Subnet
(pending,available)
- *vpdId*, the ID of teh VPC the subnet is in.
- *cidrBlock*, CIDR block of the subnet
- *availabilityZone*, the Availability Zone
the subnet is in.
:rtype: list
:return: A list of :class:`boto.vpc.subnet.Subnet`
|
def post(self, request, uri):
uri = self.decode_uri(uri)
data, meta = self.get_post_data(request)
meta['author'] = auth.get_username(request)
node = cio.set(uri, data, publish=False, **meta)
return self.render_to_json(node)
|
Set node data for uri, return rendered content.
JSON Response:
{uri: x, content: y}
|
def copy(self):
if self.object_getattr is Query.object_getattr:
other = Query(self.key)
else:
other = Query(self.key, object_getattr=self.object_getattr)
other.limit = self.limit
other.offset = self.offset
other.offset_key = self.offset_key
other.filters = self.filters
other.orders = self.orders
return other
|
Returns a copy of this query.
|
def create(self):
api = self._instance._client.database_admin_api
metadata = _metadata_with_prefix(self.name)
db_name = self.database_id
if "-" in db_name:
db_name = "`%s`" % (db_name,)
future = api.create_database(
parent=self._instance.name,
create_statement="CREATE DATABASE %s" % (db_name,),
extra_statements=list(self._ddl_statements),
metadata=metadata,
)
return future
|
Create this database within its instance
Inclues any configured schema assigned to :attr:`ddl_statements`.
See
https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.database.v1#google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase
:rtype: :class:`~google.api_core.operation.Operation`
:returns: a future used to poll the status of the create request
:raises Conflict: if the database already exists
:raises NotFound: if the instance owning the database does not exist
|
def isPeregrine(self):
return isPeregrine(self.obj.id,
self.obj.sign,
self.obj.signlon)
|
Returns if this object is peregrine.
|
def _management_form(self):
if self.is_bound:
form = ConcurrentManagementForm(self.data, auto_id=self.auto_id,
prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ConcurrentManagementForm(auto_id=self.auto_id,
prefix=self.prefix,
initial={TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num},
versions=[(form.instance.pk, get_revision_of_object(form.instance)) for form
in self.initial_forms])
return form
|
Returns the ManagementForm instance for this FormSet.
|
def _match_depth(self, sect, depth):
while depth < sect.depth:
if sect is sect.parent:
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
raise SyntaxError()
|
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
|
def list_firmware_manifests(self, **kwargs):
kwargs = self._verify_sort_options(kwargs)
kwargs = self._verify_filters(kwargs, FirmwareManifest, True)
api = self._get_api(update_service.DefaultApi)
return PaginatedResponse(api.firmware_manifest_list, lwrap_type=FirmwareManifest, **kwargs)
|
List all manifests.
:param int limit: number of manifests to retrieve
:param str order: sort direction of manifests when ordered by time. 'desc' or 'asc'
:param str after: get manifests after given `image_id`
:param dict filters: Dictionary of filters to apply
:return: list of :py:class:`FirmwareManifest` objects
:rtype: PaginatedResponse
|
def _consent_registration(self, consent_args):
jws = JWS(json.dumps(consent_args), alg=self.signing_key.alg).sign_compact([self.signing_key])
request = "{}/creq/{}".format(self.api_url, jws)
res = requests.get(request)
if res.status_code != 200:
raise UnexpectedResponseError("Consent service error: %s %s", res.status_code, res.text)
return res.text
|
Register a request at the consent service
:type consent_args: dict
:rtype: str
:param consent_args: All necessary parameters for the consent request
:return: Ticket received from the consent service
|
def container_exists(self, id=None, name=None):
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists
|
Checks if container exists already
|
def _load_dataframe(self, resource_name):
try:
import pandas
except ImportError:
raise RuntimeError('To enable dataframe support, '
'run \'pip install datadotworld[pandas]\'')
tabular_resource = self.__tabular_resources[resource_name]
field_dtypes = fields_to_dtypes(tabular_resource.descriptor['schema'])
try:
return pandas.read_csv(
path.join(
self.__base_path,
tabular_resource.descriptor['path']),
dtype=field_dtypes['other'],
parse_dates=list(field_dtypes['dates'].keys()),
infer_datetime_format=True)
except ValueError as e:
warnings.warn(
'Unable to set data frame dtypes automatically using {} '
'schema. Data types may need to be adjusted manually. '
'Error: {}'.format(resource_name, e))
return pandas.read_csv(
path.join(
self.__base_path,
tabular_resource.descriptor['path']))
|
Build pandas.DataFrame from resource data
Lazy load any optional dependencies in order to allow users to
use package without installing pandas if so they wish.
:param resource_name:
|
def _build_query_string(q, default_field=None, default_operator='AND'):
def _is_phrase_search(query_string):
clean_query = query_string.strip()
return clean_query and clean_query.startswith('"') and clean_query.endswith('"')
def _get_phrase(query_string):
return query_string.strip().strip('"')
if _is_phrase_search(q):
query = {'match_phrase': {'_all': _get_phrase(q)}}
else:
query = {'query_string': {'query': q, 'default_operator': default_operator}}
query['query_string'].update({'lenient': False} if default_field else {'default_field': default_field})
return query
|
Build ``query_string`` object from ``q``.
:param q: q of type String
:param default_field: default_field
:return: dictionary object.
|
def _comic_archive_write_zipfile(new_filename, tmp_dir):
if Settings.verbose:
print('Rezipping archive', end='')
with zipfile.ZipFile(new_filename, 'w',
compression=zipfile.ZIP_DEFLATED) as new_zf:
root_len = len(os.path.abspath(tmp_dir))
for r_d_f in os.walk(tmp_dir):
root = r_d_f[0]
filenames = r_d_f[2]
archive_root = os.path.abspath(root)[root_len:]
for fname in filenames:
fullpath = os.path.join(root, fname)
archive_name = os.path.join(archive_root, fname)
if Settings.verbose:
print('.', end='')
new_zf.write(fullpath, archive_name, zipfile.ZIP_DEFLATED)
|
Zip up the files in the tempdir into the new filename.
|
def read_transport(self):
if ('r' not in self.access_type):
raise BTIncompatibleTransportAccessType
return self.codec.decode(self.fd, self.read_mtu)
|
Read data from media transport.
The returned data payload is SBC decoded and has
all RTP encapsulation removed.
:return data: Payload data that has been decoded,
with RTP encapsulation removed.
:rtype: array{byte}
|
def add_options(self, path: str, handler: _WebHandler,
**kwargs: Any) -> AbstractRoute:
return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs)
|
Shortcut for add_route with method OPTIONS
|
def build_model_input(cls, name='input'):
return cls(name, PortDirection.INPUT, type=PortType.MODEL)
|
Build a model input port.
:param name: port name
:type name: str
:return: port object
:rtype: PortDef
|
def format_hsl(hsl_color):
hue, saturation, lightness = hsl_color
return 'hsl({}, {:.2%}, {:.2%})'.format(hue, saturation, lightness)
|
Format hsl color as css color string.
|
def results(self, Pc):
r
Psatn = self['pore.invasion_pressure'] <= Pc
Tsatn = self['throat.invasion_pressure'] <= Pc
inv_phase = {}
inv_phase['pore.occupancy'] = sp.array(Psatn, dtype=float)
inv_phase['throat.occupancy'] = sp.array(Tsatn, dtype=float)
return inv_phase
|
r"""
This method determines which pores and throats are filled with invading
phase at the specified capillary pressure, and creates several arrays
indicating the occupancy status of each pore and throat for the given
pressure.
Parameters
----------
Pc : scalar
The capillary pressure for which an invading phase configuration
is desired.
Returns
-------
A dictionary containing an assortment of data about distribution
of the invading phase at the specified capillary pressure. The data
include:
**'pore.occupancy'** : A value between 0 and 1 indicating the
fractional volume of each pore that is invaded. If no late pore
filling model was applied, then this will only be integer values
(either filled or not).
**'throat.occupancy'** : The same as 'pore.occupancy' but for throats.
This dictionary can be passed directly to the ``update`` method of
the *Phase* object. These values can then be accessed by models
or algorithms.
|
def downloads_per_day(self):
count, num_days = self._downloads_for_num_days(7)
res = ceil(count / num_days)
logger.debug("Downloads per day = (%d / %d) = %d", count, num_days, res)
return res
|
Return the number of downloads per day, averaged over the past 7 days
of data.
:return: average number of downloads per day
:rtype: int
|
def _calculate_weights(self, this_samples, N):
this_weights = self.weights.append(N)[:,0]
if self.target_values is None:
for i in range(N):
tmp = self.target(this_samples[i]) - self.proposal.evaluate(this_samples[i])
this_weights[i] = _exp(tmp)
else:
this_target_values = self.target_values.append(N)
for i in range(N):
this_target_values[i] = self.target(this_samples[i])
tmp = this_target_values[i] - self.proposal.evaluate(this_samples[i])
this_weights[i] = _exp(tmp)
|
Calculate and save the weights of a run.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.