Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
21,100 | def _cumsum(group_idx, a, size, fill_value=None, dtype=None):
sortidx = np.argsort(group_idx, kind=)
invsortidx = np.argsort(sortidx, kind=)
group_idx_srt = group_idx[sortidx]
a_srt = a[sortidx]
a_srt_cumsum = np.cumsum(a_srt, dtype=dtype)
increasing = np.arange(len(a), dtype=int)
group_starts = _min(group_idx_srt, increasing, size, fill_value=0)[group_idx_srt]
a_srt_cumsum += -a_srt_cumsum[group_starts] + a_srt[group_starts]
return a_srt_cumsum[invsortidx] | N to N aggregate operation of cumsum. Perform cumulative sum for each group.
group_idx = np.array([4, 3, 3, 4, 4, 1, 1, 1, 7, 8, 7, 4, 3, 3, 1, 1])
a = np.array([3, 4, 1, 3, 9, 9, 6, 7, 7, 0, 8, 2, 1, 8, 9, 8])
_cumsum(group_idx, a, np.max(group_idx) + 1)
>>> array([ 3, 4, 5, 6, 15, 9, 15, 22, 7, 0, 15, 17, 6, 14, 31, 39]) |
21,101 | def get(self, sid):
return VerificationContext(self._version, service_sid=self._solution[], sid=sid, ) | Constructs a VerificationContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.verify.v2.service.verification.VerificationContext
:rtype: twilio.rest.verify.v2.service.verification.VerificationContext |
21,102 | def is_possible_type(
self, abstract_type: GraphQLAbstractType, possible_type: GraphQLObjectType
) -> bool:
possible_type_map = self._possible_type_map
try:
possible_type_names = possible_type_map[abstract_type.name]
except KeyError:
possible_types = self.get_possible_types(abstract_type)
possible_type_names = {type_.name for type_ in possible_types}
possible_type_map[abstract_type.name] = possible_type_names
return possible_type.name in possible_type_names | Check whether a concrete type is possible for an abstract type. |
21,103 | def setup_random_seed(seed):
if seed == -1:
seed = np.random.randint(0,
int(1e9))
np.random.seed(seed) | Setup the random seed. If the input seed is -1, the code will use a random seed for every run. If it is \
positive, that seed is used for all runs, thereby giving reproducible results.
Parameters
----------
seed : int
The seed of the random number generator. |
21,104 | def pickle_dict(items):
t instances of
basestring are pickled. Also, a new key contains a comma
separated list of keys corresponding to the pickled values.
_pickled,'.join(pickled_keys)
return ret | Returns a new dictionary where values which aren't instances of
basestring are pickled. Also, a new key '_pickled' contains a comma
separated list of keys corresponding to the pickled values. |
21,105 | def parse_string(self, string):
self.log.info("Parsing ASCII data")
if not string:
self.log.warning("Empty metadata")
return
lines = string.splitlines()
application_data = []
application = lines[0].split()[0]
self.log.debug("Reading meta information for " % application)
for line in lines:
if application is None:
self.log.debug(
"Reading meta information for " % application
)
application = line.split()[0]
application_data.append(line)
if line.startswith(application + b):
self._record_app_data(application_data)
application_data = []
application = None | Parse ASCII output of JPrintMeta |
21,106 | def execute(self, method, *args, **kargs):
result = None
for i in range(0, 10):
try:
method_map = {
: self.get_lead_by_id,
: self.get_multiple_leads_by_filter_type,
: self.get_multiple_leads_by_list_id,
: self.get_multiple_leads_by_list_id_yield,
: self.get_multiple_leads_by_program_id,
: self.get_multiple_leads_by_program_id_yield,
: self.change_lead_program_status,
: self.create_update_leads,
: self.associate_lead,
: self.push_lead,
: self.merge_lead,
: self.get_lead_partitions,
: self.create_list,
: self.update_list,
: self.delete_list,
: self.get_list_by_id,
: self.get_list_by_name,
: self.get_multiple_lists,
: self.browse_lists,
: self.add_leads_to_list,
: self.remove_leads_from_list,
: self.member_of_list,
: self.get_campaign_by_id,
: self.get_multiple_campaigns,
: self.schedule_campaign,
: self.request_campaign,
: self.import_lead,
: self.get_import_lead_status,
: self.get_import_failure_file,
: self.get_import_warning_file,
: self.describe,
: self.get_activity_types,
: self.get_paging_token,
: self.get_lead_activities,
: self.get_lead_activities_yield,
: self.get_lead_changes,
: self.get_lead_changes_yield,
: self.add_custom_activities,
: self.get_daily_usage,
: self.get_last_7_days_usage,
: self.get_daily_errors,
: self.get_last_7_days_errors,
: self.delete_lead,
: self.get_deleted_leads,
: self.update_leads_partition,
: self.create_folder,
: self.get_folder_by_id,
: self.get_folder_by_name,
: self.get_folder_contents,
: self.update_folder,
: self.delete_folder,
: self.browse_folders,
: self.create_token,
: self.get_tokens,
: self.delete_tokens,
: self.create_email_template,
: self.get_email_template_by_id,
: self.get_email_template_by_name,
: self.update_email_template,
: self.delete_email_template,
: self.get_email_templates,
: self.get_email_templates_yield,
: self.get_email_template_content,
: self.update_email_template_content,
: self.approve_email_template,
: self.unapprove_email_template,
: self.discard_email_template_draft,
: self.clone_email_template,
: self.create_email,
: self.get_email_by_id,
: self.get_email_by_name,
: self.delete_email,
: self.update_email,
: self.get_emails,
: self.get_emails_yield,
: self.get_email_content,
: self.update_email_content,
: self.update_email_content_in_editable_section,
: self.get_email_dynamic_content,
: self.update_email_dynamic_content,
: self.approve_email,
: self.unapprove_email,
: self.discard_email_draft,
: self.clone_email,
: self.send_sample_email,
: self.get_email_full_content,
: self.create_landing_page,
: self.get_landing_page_by_id,
: self.get_landing_page_by_name,
: self.delete_landing_page,
: self.update_landing_page,
: self.get_landing_pages,
: self.get_landing_pages_yield,
: self.get_landing_page_content,
: self.create_landing_page_content_section,
: self.update_landing_page_content_section,
: self.delete_landing_page_content_section,
: self.get_landing_page_dynamic_content,
: self.update_landing_page_dynamic_content,
: self.approve_landing_page,
: self.unapprove_landing_page,
: self.discard_landing_page_draft,
: self.clone_landing_page,
: self.create_form,
: self.get_form_by_id,
: self.get_form_by_name,
: self.delete_form,
: self.update_form,
: self.get_forms,
: self.get_forms_yield,
: self.get_form_fields,
: self.create_form_field,
: self.update_form_field,
: self.delete_form_field,
: self.approve_form,
: self.unapprove_form,
: self.discard_form_draft,
: self.clone_form,
: self.create_file,
: self.get_file_by_id,
: self.get_file_by_name,
: self.list_files,
: self.get_files_yield,
: self.update_file_content,
: self.create_snippet,
: self.get_snippet_by_id,
: self.delete_snippet,
: self.update_snippet,
: self.get_snippets,
: self.get_snippets_yield,
: self.get_snippet_content,
: self.update_snippet_content,
: self.approve_snippet,
: self.unapprove_snippet,
: self.discard_snippet_draft,
: self.clone_snippet,
: self.update_snippet_dynamic_content,
: self.get_snippet_dynamic_content,
: self.get_segmentations,
: self.get_segments,
: self.create_landing_page_template,
: self.get_landing_page_template_by_id,
: self.get_landing_page_template_by_name,
: self.get_landing_page_templates,
: self.get_landing_page_templates_yield,
: self.get_landing_page_template_content,
: self.update_landing_page_template_content,
: self.update_landing_page_template,
: self.delete_landing_page_template,
: self.approve_landing_page_template,
: self.unapprove_landing_page_template,
: self.discard_landing_page_template_draft,
: self.clone_landing_page_template,
: self.create_program,
: self.get_program_by_id,
: self.get_program_by_name,
: self.get_program_by_tag_type,
: self.update_program,
: self.delete_program,
: self.browse_programs,
: self.get_programs_yield,
: self.clone_program,
: self.approve_program,
: self.unapprove_program,
: self.get_channels,
: self.get_channel_by_name,
: self.get_tags,
: self.get_tag_by_name,
: self.get_list_of_custom_objects,
: self.describe_custom_object,
: self.create_update_custom_objects,
: self.delete_custom_objects,
: self.get_custom_objects,
: self.describe_opportunity,
: self.create_update_opportunities,
: self.delete_opportunities,
: self.get_opportunities,
: self.describe_opportunity_role,
: self.create_update_opportunities_roles,
: self.delete_opportunity_roles,
: self.get_opportunity_roles,
: self.describe_company,
: self.create_update_companies,
: self.delete_companies,
: self.get_companies,
: self.describe_sales_person,
: self.create_update_sales_persons,
: self.delete_sales_persons,
: self.get_sales_persons,
: self.get_custom_activity_types,
: self.describe_custom_activity_type,
: self.create_custom_activity_type,
: self.update_custom_activity_type,
: self.approve_custom_activity_type,
: self.create_custom_activity_type_attribute,
: self.discard_custom_activity_type_draft,
: self.delete_custom_activity_type,
: self.update_custom_activity_type_attribute,
: self.delete_custom_activity_type_attribute,
: self.get_leads_export_jobs_list,
: self.get_activities_export_jobs_list,
: self.create_leads_export_job,
: self.create_activities_export_job,
: self.enqueue_leads_export_job,
: self.enqueue_activities_export_job,
: self.cancel_leads_export_job,
: self.cancel_activities_export_job,
: self.get_leads_export_job_status,
: self.get_activities_export_job_status,
: self.get_leads_export_job_file,
: self.get_activities_export_job_file
}
result = method_map[method](*args, **kargs)
except MarketoException as e:
if e.code in [, ]:
self.authenticate()
continue
else:
raise Exception({: e.message, : e.code})
break
return result | max 10 rechecks |
21,107 | async def search_participant(self, name, force_update=False):
if force_update or self.participants is None:
await self.get_participants()
if self.participants is not None:
for p in self.participants:
if p.name == name:
return p
return None | search a participant by (display) name
|methcoro|
Args:
name: display name of the participant
force_update (dfault=False): True to force an update to the Challonge API
Returns:
Participant: None if not found
Raises:
APIException |
21,108 | def shape(self):
shp = (self.ds.RasterYSize, self.ds.RasterXSize, self.ds.RasterCount)
return shp[:2] if shp[2] <= 1 else shp | Returns a tuple of row, column, (band count if multidimensional). |
21,109 | def default_start():
(config, daemon, pidfile, startup, fork) = parsearg()
if config is None:
if os.path.isfile():
config =
else:
print()
elif not config:
config = None
main(config, startup, daemon, pidfile, fork) | Use `sys.argv` for starting parameters. This is the entry-point of `vlcp-start` |
21,110 | def follow(ctx, nick, url, force):
source = Source(nick, url)
sources = ctx.obj[].following
if not force:
if source.nick in (source.nick for source in sources):
click.confirm("➤ You’re already following {0}. Overwrite?".format(
click.style(source.nick, bold=True)), default=False, abort=True)
_, status = get_remote_status([source])[0]
if not status or status.status_code != 200:
click.confirm("➤ The feed of {0} at {1} is not available. Follow anyway?".format(
click.style(source.nick, bold=True),
click.style(source.url, bold=True)), default=False, abort=True)
ctx.obj[].add_source(source)
click.echo("✓ You’re now following {0}.".format(
click.style(source.nick, bold=True))) | Add a new source to your followings. |
21,111 | def fetchall(self, mode=5, after=0, parent=, order_by=,
limit=100, page=0, asc=1):
fields_comments = [, , , , ,
, , , ,
, , , ]
fields_threads = [, ]
sql_comments_fields = .join([ + f
for f in fields_comments])
sql_threads_fields = .join([ + f
for f in fields_threads])
sql = [ + sql_comments_fields + + sql_threads_fields +
]
sql_args = [mode]
if parent != :
if parent is None:
sql.append()
else:
sql.append()
sql_args.append(parent)
if order_by not in [, , , , , ]:
sql.append()
sql.append("comments.created")
if not asc:
sql.append()
else:
sql.append()
sql.append( + order_by)
if not asc:
sql.append()
sql.append(", comments.created")
if limit:
sql.append()
sql_args.append(page * limit)
sql_args.append(limit)
rv = self.db.execute(sql, sql_args).fetchall()
for item in rv:
yield dict(zip(fields_comments + fields_threads, item)) | Return comments for admin with :param:`mode`. |
21,112 | def type_validator(validator, types, instance, schema):
if schema.get() == :
return []
return _validators.type_draft3(validator, types, instance, schema) | Swagger 1.2 supports parameters of 'type': 'File'. Skip validation of
the 'type' field in this case. |
21,113 | def get_fallback_resolution(self):
ppi = ffi.new()
cairo.cairo_surface_get_fallback_resolution(
self._pointer, ppi + 0, ppi + 1)
return tuple(ppi) | Returns the previous fallback resolution
set by :meth:`set_fallback_resolution`,
or default fallback resolution if never set.
:returns: ``(x_pixels_per_inch, y_pixels_per_inch)`` |
21,114 | def as_xml_index(self, basename="/tmp/sitemap.xml"):
num_parts = self.requires_multifile()
if (not num_parts):
raise ListBaseIndexError(
"Request for sitemapindex for list with only %d entries when max_sitemap_entries is set to %s" %
(len(self), str(
self.max_sitemap_entries)))
index = ListBase()
index.sitemapindex = True
index.capability_name = self.capability_name
index.default_capability()
for n in range(num_parts):
r = Resource(uri=self.part_name(basename, n))
index.add(r)
return(index.as_xml()) | Return a string of the index for a large list that is split.
All we need to do is determine the number of component sitemaps will
be is and generate their URIs based on a pattern.
Q - should there be a flag to select generation of each component sitemap
in order to calculate the md5sum?
Q - what timestamp should be used? |
21,115 | def find():
spark_home = os.environ.get(, None)
if not spark_home:
for path in [
,
,
,
,
]:
if os.path.exists(path):
spark_home = path
break
if not spark_home:
raise ValueError("Couldn't find Spark, make sure SPARK_HOME env is set"
" or Spark is in an expected location (e.g. from homebrew installation).")
return spark_home | Find a local spark installation.
Will first check the SPARK_HOME env variable, and otherwise
search common installation locations, e.g. from homebrew |
21,116 | def parse_timespan_value(s):
number, unit = split_number_and_unit(s)
if not unit or unit == "s":
return number
elif unit == "min":
return number * 60
elif unit == "h":
return number * 60 * 60
elif unit == "d":
return number * 24 * 60 * 60
else:
raise ValueError(.format(unit)) | Parse a string that contains a time span, optionally with a unit like s.
@return the number of seconds encoded by the string |
21,117 | def adjustPoolSize(self, newsize):
if newsize < 0:
raise ValueError("pool size must be nonnegative")
self.log("Adjust pool size from %d to %d." % (self.target_pool_size, newsize))
self.target_pool_size = newsize
self.kill_excess_pending_conns()
self.kill_excess_conns()
self.fill_pool() | Change the target pool size. If we have too many connections already,
ask some to finish what they're doing and die (preferring to kill
connections to the node that already has the most connections). If
we have too few, create more. |
21,118 | def parse_segment(text, version=None, encoding_chars=None, validation_level=None, reference=None):
version = _get_version(version)
encoding_chars = _get_encoding_chars(encoding_chars, version)
validation_level = _get_validation_level(validation_level)
segment_name = text[:3]
text = text[4:] if segment_name != else text[3:]
segment = Segment(segment_name, version=version, validation_level=validation_level,
reference=reference)
segment.children = parse_fields(text, segment_name, version, encoding_chars, validation_level,
segment.structure_by_name, segment.allow_infinite_children)
return segment | Parse the given ER7-encoded segment and return an instance of :class:`Segment <hl7apy.core.Segment>`.
:type text: ``str``
:param text: the ER7-encoded string containing the segment to be parsed
:type version: ``str``
:param version: the HL7 version (e.g. "2.5"), or ``None`` to use the default
(see :func:`set_default_version <hl7apy.set_default_version>`)
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`set_default_encoding_chars <hl7apy.set_default_encoding_chars>`)
:type validation_level: ``int``
:param validation_level: the validation level. Possible values are those defined in
:class:`VALIDATION_LEVEL <hl7apy.consts.VALIDATION_LEVEL>` class or ``None`` to use the default
validation level (see :func:`set_default_validation_level <hl7apy.set_default_validation_level>`)
:type reference: ``dict``
:param reference: a dictionary containing the element structure returned by
:func:`load_reference <hl7apy.load_reference>`, :func:`find_reference <hl7apy.find_reference>` or
belonging to a message profile
:return: an instance of :class:`Segment <hl7apy.core.Segment>`
>>> segment = "EVN||20080115153000||||20080114003000"
>>> s = parse_segment(segment)
>>> print(s)
<Segment EVN>
>>> print(s.to_er7())
EVN||20080115153000||||20080114003000 |
21,119 | def depth_profile(list_, max_depth=None, compress_homogenous=True, compress_consecutive=False, new_depth=False):
r
if isinstance(list_, dict):
list_ = list(list_.values())
level_shape_list = []
if not any(map(util_type.is_listlike, list_)):
return len(list_)
if False and new_depth:
pass
else:
for item in list_:
if isinstance(item, dict):
item = list(item.values())
if util_type.is_listlike(item):
if max_depth is None:
level_shape_list.append(depth_profile(item, None))
else:
if max_depth >= 0:
level_shape_list.append(depth_profile(item, max_depth - 1))
else:
level_shape_list.append(str(len(item)))
else:
level_shape_list.append(1)
if compress_homogenous:
if allsame(level_shape_list):
dim_ = level_shape_list[0]
len_ = len(level_shape_list)
if isinstance(dim_, tuple):
level_shape_list = tuple([len_] + list(dim_))
else:
level_shape_list = tuple([len_, dim_])
if compress_consecutive:
hash_list = list(map(hash, map(str, level_shape_list)))
consec_list = group_consecutives(hash_list, 0)
if len(consec_list) != len(level_shape_list):
len_list = list(map(len, consec_list))
cumsum_list = np.cumsum(len_list)
consec_str =
thresh = 1
for len_, cumsum in zip(len_list, cumsum_list):
value = level_shape_list[cumsum - 1]
if len_ > thresh:
consec_str += str(value) + + str(len_)
consec_str +=
else:
consec_str += str(value) +
if consec_str.endswith():
consec_str = consec_str[:-2]
consec_str +=
level_shape_list = consec_str
return level_shape_list | r"""
Returns a nested list corresponding the shape of the nested structures
lists represent depth, tuples represent shape. The values of the items do
not matter. only the lengths.
Args:
list_ (list):
max_depth (None):
compress_homogenous (bool):
compress_consecutive (bool): experimental
CommandLine:
python -m utool.util_list --test-depth_profile
Setup:
>>> from utool.util_list import * # NOQA
Example0:
>>> # ENABLE_DOCTEST
>>> list_ = [[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
>>> result = depth_profile(list_)
>>> print(result)
(2, 3, 4)
Example1:
>>> # ENABLE_DOCTEST
>>> list_ = [[[[[1]]], [3, 4, 33]], [[1], [2, 3], [4, [5, 5]]], [1, 3]]
>>> result = depth_profile(list_)
>>> print(result)
[[(1, 1, 1), 3], [1, 2, [1, 2]], 2]
Example2:
>>> # ENABLE_DOCTEST
>>> list_ = [[[[[1]]], [3, 4, 33]], [[1], [2, 3], [4, [5, 5]]], [1, 3]]
>>> result = depth_profile(list_, max_depth=1)
>>> print(result)
[[(1, '1'), 3], [1, 2, [1, '2']], 2]
Example3:
>>> # ENABLE_DOCTEST
>>> list_ = [[[1, 2], [1, 2, 3]], None]
>>> result = depth_profile(list_, compress_homogenous=True)
>>> print(result)
[[2, 3], 1]
Example4:
>>> # ENABLE_DOCTEST
>>> list_ = [[3, 2], [3, 2], [3, 2], [3, 2], [3, 2], [3, 2], [9, 5, 3], [2, 2]]
>>> result = depth_profile(list_, compress_homogenous=True, compress_consecutive=True)
>>> print(result)
[2] * 6 + [3, 2]
Example5:
>>> # ENABLE_DOCTEST
>>> list_ = [[[3, 9], 2], [[3, 9], 2], [[3, 9], 2], [[3, 9], 2]] #, [3, 2], [3, 2]]
>>> result = depth_profile(list_, compress_homogenous=True, compress_consecutive=True)
>>> print(result)
(4, [2, 1])
Example6:
>>> # ENABLE_DOCTEST
>>> list_ = [[[[1, 2]], [1, 2]], [[[1, 2]], [1, 2]], [[[0, 2]], [1]]]
>>> result1 = depth_profile(list_, compress_homogenous=True, compress_consecutive=False)
>>> result2 = depth_profile(list_, compress_homogenous=True, compress_consecutive=True)
>>> result = str(result1) + '\n' + str(result2)
>>> print(result)
[[(1, 2), 2], [(1, 2), 2], [(1, 2), 1]]
[[(1, 2), 2]] * 2 + [[(1, 2), 1]]
Example7:
>>> # ENABLE_DOCTEST
>>> list_ = [[{'a': [1, 2], 'b': [3, 4, 5]}, [1, 2, 3]], None]
>>> result = depth_profile(list_, compress_homogenous=True)
>>> print(result)
Example8:
>>> # ENABLE_DOCTEST
>>> list_ = [[[1]], [[[1, 1], [1, 1]]], [[[[1, 3], 1], [[1, 3, 3], 1, 1]]]]
>>> result = depth_profile(list_, compress_homogenous=True)
>>> print(result)
Example9:
>>> # ENABLE_DOCTEST
>>> list_ = []
>>> result = depth_profile(list_)
>>> print(result)
# THIS IS AN ERROR???
SHOULD BE
#[1, 1], [1, 2, 2], (1, ([1, 2]), (
Example10:
>>> # ENABLE_DOCTEST
>>> fm1 = [[0, 0], [0, 0]]
>>> fm2 = [[0, 0], [0, 0], [0, 0]]
>>> fm3 = [[0, 0], [0, 0], [0, 0], [0, 0]]
>>> list_ = [0, 0, 0]
>>> list_ = [fm1, fm2, fm3]
>>> max_depth = 0
>>> new_depth = True
>>> result = depth_profile(list_, max_depth=max_depth, new_depth=new_depth)
>>> print(result) |
21,120 | def bs_values_df(run_list, estimator_list, estimator_names, n_simulate,
**kwargs):
tqdm_kwargs = kwargs.pop(, {: })
assert len(estimator_list) == len(estimator_names), (
.format(len(estimator_list), len(estimator_names)))
bs_values_list = pu.parallel_apply(
nestcheck.error_analysis.run_bootstrap_values, run_list,
func_args=(estimator_list,), func_kwargs={: n_simulate},
tqdm_kwargs=tqdm_kwargs, **kwargs)
df = pd.DataFrame()
for i, name in enumerate(estimator_names):
df[name] = [arr[i, :] for arr in bs_values_list]
for vals_shape in df.loc[0].apply(lambda x: x.shape).values:
assert vals_shape == (n_simulate,), (
+ str(n_simulate) + +
+
str(vals_shape))
return df | Computes a data frame of bootstrap resampled values.
Parameters
----------
run_list: list of dicts
List of nested sampling run dicts.
estimator_list: list of functions
Estimators to apply to runs.
estimator_names: list of strs
Name of each func in estimator_list.
n_simulate: int
Number of bootstrap replications to use on each run.
kwargs:
Kwargs to pass to parallel_apply.
Returns
-------
bs_values_df: pandas data frame
Columns represent estimators and rows represent runs.
Each cell contains a 1d array of bootstrap resampled values for the run
and estimator. |
21,121 | def find_modules_with_decorators(path,decorator_module,decorator_name):
modules_paths = []
if path[-3:] == :
modules_paths.append(path)
else :
modules_paths += find_file_regex(path,)
return [module for module in modules_paths if is_module_has_decorated(module,decorator_module,decorator_name)] | Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator. |
21,122 | def cumulative_distance(lat, lon,dist_int=None):
une ligne.
;
; @Author : Renaud DUSSURGET, LEGOS/CTOH
; @History :
; - Feb. 2009 : First release (adapted from calcul_distance)
;-
'
rt = 6378.137
nelts=lon.size
lon_a = np.deg2rad(lon[0:nelts - 1])
lon_b = np.deg2rad(lon[1:nelts])
lat_a = np.deg2rad(lat[0:nelts - 1])
lat_b = np.deg2rad(lat[1:nelts])
interm = np.cos(lat_a) * np.cos(lat_b) * np.cos(lon_a - lon_b) + np.sin(lat_a) * np.sin(lat_b)
dist_int=np.append(0,rt*np.arccos(interm))
return dist_int.cumsum() | ;+
; CUMULATIVE_DISTANCE : permet de calculer la distance le long d'une ligne.
;
; @Author : Renaud DUSSURGET, LEGOS/CTOH
; @History :
; - Feb. 2009 : First release (adapted from calcul_distance)
;- |
21,123 | def receive_message(self, message, data):
if data[] == TYPE_RESPONSE_STATUS:
self.is_launched = True
return True | Currently not doing anything with received messages. |
21,124 | def show_proportions(adata):
layers_keys = [key for key in [, , ] if key in adata.layers.keys()]
tot_mol_cell_layers = [adata.layers[key].sum(1) for key in layers_keys]
mean_abundances = np.round(
[np.mean(tot_mol_cell / np.sum(tot_mol_cell_layers, 0)) for tot_mol_cell in tot_mol_cell_layers], 2)
print( + str(layers_keys) + + str(mean_abundances)) | Fraction of spliced/unspliced/ambiguous abundances
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
Returns
-------
Prints the fractions of abundances. |
21,125 | def _lexical_chains(self, doc, term_concept_map):
concepts = list({c for c in term_concept_map.values()})
n_cons = len(concepts)
adj_mat = np.zeros((n_cons, n_cons))
for i, c in enumerate(concepts):
for j, c_ in enumerate(concepts):
edge = 0
if c == c_:
edge = 1
elif c_ in c._shortest_hypernym_paths(simulate_root=False).keys():
edge = 2
elif c in c_._shortest_hypernym_paths(simulate_root=False).keys():
edge = 2
elif c_ in c.member_meronyms() + c.part_meronyms() + c.substance_meronyms():
edge = 3
elif c in c_.member_meronyms() + c_.part_meronyms() + c_.substance_meronyms():
edge = 3
adj_mat[i,j] = edge
concept_labels = connected_components(adj_mat, directed=False)[1]
lexical_chains = [([], []) for i in range(max(concept_labels) + 1)]
for i, concept in enumerate(concepts):
label = concept_labels[i]
lexical_chains[label][0].append(concept)
lexical_chains[label][1].append(i)
return [(chain, adj_mat[indices][:,indices]) for chain, indices in lexical_chains] | Builds lexical chains, as an adjacency matrix,
using a disambiguated term-concept map. |
21,126 | def charge(self):
if self._reader._level == 3:
for ns in (FBC_V2, FBC_V1):
charge = self._root.get(_tag(, ns))
if charge is not None:
return self._parse_charge_string(charge)
else:
charge = self._root.get()
if charge is not None:
return self._parse_charge_string(charge)
return None | Species charge |
21,127 | def _mom(self, kloc, cache, **kwargs):
if evaluation.get_dependencies(*list(self.inverse_map)):
raise StochasticallyDependentError(
"Joint distribution with dependencies not supported.")
output = 1.
for dist in evaluation.sorted_dependencies(self):
if dist not in self.inverse_map:
continue
idx = self.inverse_map[dist]
kloc_ = kloc[idx].reshape(1)
output *= evaluation.evaluate_moment(dist, kloc_, cache=cache)
return output | Example:
>>> dist = chaospy.J(chaospy.Uniform(), chaospy.Normal())
>>> print(numpy.around(dist.mom([[0, 0, 1], [0, 1, 1]]), 4))
[1. 0. 0.]
>>> d0 = chaospy.Uniform()
>>> dist = chaospy.J(d0, d0+chaospy.Uniform())
>>> print(numpy.around(dist.mom([1, 1]), 4))
0.5833 |
21,128 | def parse(self,DXfield):
self.DXfield = DXfield
self.currentobject = None
self.objects = []
self.tokens = []
with open(self.filename,) as self.dxfile:
self.use_parser()
for o in self.objects:
if o.type == :
DXfield.id = o.id
continue
c = o.initialize()
self.DXfield.add(c.component,c)
del self.currentobject, self.objects | Parse the dx file and construct a DX field object with component classes.
A :class:`field` instance *DXfield* must be provided to be
filled by the parser::
DXfield_object = OpenDX.field(*args)
parse(DXfield_object)
A tokenizer turns the dx file into a stream of tokens. A
hierarchy of parsers examines the stream. The level-0 parser
('general') distinguishes comments and objects (level-1). The
object parser calls level-3 parsers depending on the object
found. The basic idea is that of a 'state machine'. There is
one parser active at any time. The main loop is the general
parser.
* Constructing the dx objects with classtype and classid is
not implemented yet.
* Unknown tokens raise an exception. |
21,129 | def custom_to_pmrapmdec(pmphi1,pmphi2,phi1,phi2,T=None,degree=False):
if T is None: raise ValueError("Must set T= for custom_to_pmrapmdec")
return pmrapmdec_to_custom(pmphi1, pmphi2, phi1, phi2,
T=nu.transpose(T),
degree=degree) | NAME:
custom_to_pmrapmdec
PURPOSE:
rotate proper motions in a custom set of sky coordinates (phi1,phi2) to ICRS (ra,dec)
INPUT:
pmphi1 - proper motion in custom (multplied with cos(phi2)) [mas/yr]
pmphi2 - proper motion in phi2 [mas/yr]
phi1 - custom longitude
phi2 - custom latitude
T= matrix defining the transformation in cartesian coordinates:
new_rect = T dot old_rect
where old_rect = [cos(dec)cos(ra), cos(dec)sin(ra), sin(dec)] and similar for new_rect
degree= (False) if True, phi1 and phi2 are given in degrees (default=False)
OUTPUT:
(pmra x cos(dec), dec) for vector inputs [:,2]
HISTORY:
2019-03-02 - Written - Nathaniel Starkman (UofT) |
21,130 | def coarsen_all_traces(level=2, exponential=False, axes="all", figure=None):
if axes=="gca": axes=_pylab.gca()
if axes=="all":
if not figure: f = _pylab.gcf()
axes = f.axes
if not _fun.is_iterable(axes): axes = [axes]
for a in axes:
lines = a.get_lines()
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
coarsen_line(line, level, exponential, draw=False)
_pylab.draw() | This function does nearest-neighbor coarsening of the data. See
spinmob.fun.coarsen_data for more information.
Parameters
----------
level=2
How strongly to coarsen.
exponential=False
If True, use the exponential method (great for log-x plots).
axes="all"
Which axes to coarsen.
figure=None
Which figure to use. |
21,131 | def set_grade(
self,
assignment_id,
student_id,
grade_value,
gradebook_id=,
**kwargs
):
grade_info = {
: student_id,
: assignment_id,
: 2,
: .format(time.ctime(time.time())),
: str(grade_value),
: False
}
grade_info.update(kwargs)
log.info(
"student %s set_grade=%s for assignment %s",
student_id,
grade_value,
assignment_id)
return self.post(
.format(
gradebookId=gradebook_id or self.gradebook_id
),
data=grade_info,
) | Set numerical grade for student and assignment.
Set a numerical grade for for a student and assignment. Additional
options
for grade ``mode`` are: OVERALL_GRADE = ``1``, REGULAR_GRADE = ``2``
To set 'excused' as the grade, enter ``None`` for letter and
numeric grade values,
and pass ``x`` as the ``specialGradeValue``.
``ReturnAffectedValues`` flag determines whether or not to return
student cumulative points and
impacted assignment category grades (average and student grade).
Args:
assignment_id (str): numerical ID for assignment
student_id (str): numerical ID for student
grade_value (str): numerical grade value
gradebook_id (str): unique identifier for gradebook, i.e. ``2314``
kwargs (dict): dictionary of additional parameters
.. code-block:: python
{
u'letterGradeValue':None,
u'booleanGradeValue':None,
u'specialGradeValue':None,
u'mode':2,
u'isGradeApproved':False,
u'comment':None,
u'returnAffectedValues': True,
}
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
dict: dictionary containing response ``status`` and ``message``
.. code-block:: python
{
u'message': u'grade saved successfully',
u'status': 1
} |
21,132 | def CopyToDateTimeString(self):
if (self._timestamp is None or self._timestamp < self._INT64_MIN or
self._timestamp > self._INT64_MAX):
return None
return super(APFSTime, self)._CopyToDateTimeString() | Copies the APFS timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or
None if the timestamp is missing or invalid. |
21,133 | def chunk(self, regex):
chunks = []
for component in self._content:
chunks.extend(
StringComponent(component.placeholder, s) for s in
regex.split(str(component)))
for i, (chunk1, chunk2) in enumerate(
zip(chunks, islice(chunks, 1, None))):
if chunk1.placeholder is not chunk2.placeholder:
chunks[i:i + 2] = [self.__class__((chunk1, chunk2))]
return chunks | FIXME: |
21,134 | def switch_to_app(self, package):
log.debug("switching to app ...".format(package))
cmd, url = DEVICE_URLS["switch_to_app"]
widget_id = self._get_widget_id(package)
url = url.format(, package, widget_id)
self.result = self._exec(cmd, url) | activates an app that is specified by package. Selects the first
app it finds in the app list
:param package: name of package/app
:type package: str
:return: None
:rtype: None |
21,135 | def list_js_files(dir):
for dirpath, dirnames, filenames in os.walk(dir):
for filename in filenames:
if is_js_file(filename):
yield os.path.join(dirpath, filename) | Generator for all JavaScript files in the directory, recursively
>>> 'examples/module.js' in list(list_js_files('examples'))
True |
21,136 | def merge_dictionaries(a, b):
res = {}
for k in a:
res[k] = a[k]
for k in b:
res[k] = b[k]
return res | Merge two dictionaries; duplicate keys get value from b. |
21,137 | def normalized(self):
qr = self.qr /1./ np.linalg.norm(self.qr)
return DualQuaternion(qr, self.qd, True) | :obj:`DualQuaternion`: This quaternion with qr normalized. |
21,138 | def from_directory(cls, directory):
cert_path = os.path.join(directory, )
key_path = os.path.join(directory, )
for path, name in [(cert_path, ), (key_path, )]:
if not os.path.exists(path):
raise context.FileNotFoundError(
"Security %s file not found at %r" % (name, path)
)
return Security(cert_file=cert_path, key_file=key_path) | Create a security object from a directory.
Relies on standard names for each file (``skein.crt`` and
``skein.pem``). |
21,139 | def find_raw_devices(vendor=None, product=None, serial_number=None,
custom_match=None, **kwargs):
def is_usbraw(dev):
if custom_match and not custom_match(dev):
return False
return bool(find_interfaces(dev, bInterfaceClass=0xFF,
bInterfaceSubClass=0xFF))
return find_devices(vendor, product, serial_number, is_usbraw, **kwargs) | Find connected USB RAW devices. See usbutil.find_devices for more info. |
21,140 | def log(self, n=None, template=None):
cmd = [, ]
if n:
cmd.append( % n)
return self.sh(cmd, shell=False) | Run the repository log command
Returns:
str: output of log command (``bzr log -l <n>``) |
21,141 | def readSTATION0(path, stations):
stalist = []
f = open(path + , )
for line in f:
if line[1:6].strip() in stations:
station = line[1:6].strip()
lat = line[6:14]
if lat[-1] == :
NS = -1
else:
NS = 1
if lat[4] == :
lat = (int(lat[0:2]) + float(lat[2:-1]) / 60) * NS
else:
lat = (int(lat[0:2]) + float(lat[2:4] + + lat[4:-1]) /
60) * NS
lon = line[14:23]
if lon[-1] == :
EW = -1
else:
EW = 1
if lon[5] == :
lon = (int(lon[0:3]) + float(lon[3:-1]) / 60) * EW
else:
lon = (int(lon[0:3]) + float(lon[3:5] + + lon[5:-1]) /
60) * EW
elev = float(line[23:-1].strip())
if line[0] == :
elev *= -1
stalist.append((station, lat, lon, elev))
f.close()
f = open(, )
for sta in stalist:
line = .join([sta[0].ljust(5), _cc_round(sta[1], 4).ljust(10),
_cc_round(sta[2], 4).ljust(10),
_cc_round(sta[3] / 1000, 4).rjust(7), ])
f.write(line)
f.close()
return stalist | Read a Seisan STATION0.HYP file on the path given.
Outputs the information, and writes to station.dat file.
:type path: str
:param path: Path to the STATION0.HYP file
:type stations: list
:param stations: Stations to look for
:returns: List of tuples of station, lat, long, elevation
:rtype: list
>>> # Get the path to the test data
>>> import eqcorrscan
>>> import os
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> readSTATION0(TEST_PATH, ['WHFS', 'WHAT2', 'BOB'])
[('WHFS', -43.261, 170.359, 60.0), ('WHAT2', -43.2793, \
170.36038333333335, 95.0), ('BOB', 41.408166666666666, \
-174.87116666666665, 101.0)] |
21,142 | def sg_int(tensor, opt):
r
return tf.cast(tensor, tf.sg_intx, name=opt.name) | r"""Casts a tensor to intx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`. |
21,143 | def write_squonk_datasetmetadata(outputBase, thinOutput, valueClassMappings, datasetMetaProps, fieldMetaProps):
meta = {}
props = {}
if datasetMetaProps:
props.update(datasetMetaProps)
if fieldMetaProps:
meta["fieldMetaProps"] = fieldMetaProps
if len(props) > 0:
meta["properties"] = props
if valueClassMappings:
meta["valueClassMappings"] = valueClassMappings
if thinOutput:
meta[] =
else:
meta[] =
s = json.dumps(meta)
meta = open(outputBase + , )
meta.write(s)
meta.close() | This is a temp hack to write the minimal metadata that Squonk needs.
Will needs to be replaced with something that allows something more complete to be written.
:param outputBase: Base name for the file to write to
:param thinOutput: Write only new data, not structures. Result type will be BasicObject
:param valueClasses: A dict that describes the Java class of the value properties (used by Squonk)
:param datasetMetaProps: A dict with metadata properties that describe the datset as a whole.
The keys used for these metadata are up to the user, but common ones include source, description, created, history.
:param fieldMetaProps: A list of dicts with the additional field metadata. Each dict has a key named fieldName whose value
is the name of the field being described, and a key name values wholes values is a map of metadata properties.
The keys used for these metadata are up to the user, but common ones include source, description, created, history. |
21,144 | def convert(credentials):
credentials_class = type(credentials)
try:
return _CLASS_CONVERSION_MAP[credentials_class](credentials)
except KeyError as caught_exc:
new_exc = ValueError(_CONVERT_ERROR_TMPL.format(credentials_class))
six.raise_from(new_exc, caught_exc) | Convert oauth2client credentials to google-auth credentials.
This class converts:
- :class:`oauth2client.client.OAuth2Credentials` to
:class:`google.oauth2.credentials.Credentials`.
- :class:`oauth2client.client.GoogleCredentials` to
:class:`google.oauth2.credentials.Credentials`.
- :class:`oauth2client.service_account.ServiceAccountCredentials` to
:class:`google.oauth2.service_account.Credentials`.
- :class:`oauth2client.service_account._JWTAccessCredentials` to
:class:`google.oauth2.service_account.Credentials`.
- :class:`oauth2client.contrib.gce.AppAssertionCredentials` to
:class:`google.auth.compute_engine.Credentials`.
- :class:`oauth2client.contrib.appengine.AppAssertionCredentials` to
:class:`google.auth.app_engine.Credentials`.
Returns:
google.auth.credentials.Credentials: The converted credentials.
Raises:
ValueError: If the credentials could not be converted. |
21,145 | def split_into(iterable, sizes):
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size)) | Yield a list of sequential items from *iterable* of length 'n' for each
integer 'n' in *sizes*.
>>> list(split_into([1,2,3,4,5,6], [1,2,3]))
[[1], [2, 3], [4, 5, 6]]
If the sum of *sizes* is smaller than the length of *iterable*, then the
remaining items of *iterable* will not be returned.
>>> list(split_into([1,2,3,4,5,6], [2,3]))
[[1, 2], [3, 4, 5]]
If the sum of *sizes* is larger than the length of *iterable*, fewer items
will be returned in the iteration that overruns *iterable* and further
lists will be empty:
>>> list(split_into([1,2,3,4], [1,2,3,4]))
[[1], [2, 3], [4], []]
When a ``None`` object is encountered in *sizes*, the returned list will
contain items up to the end of *iterable* the same way that itertools.slice
does:
>>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
[[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
:func:`split_into` can be useful for grouping a series of items where the
sizes of the groups are not uniform. An example would be where in a row
from a table, multiple columns represent elements of the same feature
(e.g. a point represented by x,y,z) but, the format is not the same for
all columns. |
21,146 | def ls(args):
table = []
for bucket in filter_collection(resources.s3.buckets, args):
bucket.LocationConstraint = clients.s3.get_bucket_location(Bucket=bucket.name)["LocationConstraint"]
cloudwatch = resources.cloudwatch
bucket_region = bucket.LocationConstraint or "us-east-1"
if bucket_region != cloudwatch.meta.client.meta.region_name:
cloudwatch = boto3.Session(region_name=bucket_region).resource("cloudwatch")
data = get_cloudwatch_metric_stats("AWS/S3", "NumberOfObjects",
start_time=datetime.utcnow() - timedelta(days=2),
end_time=datetime.utcnow(), period=3600, BucketName=bucket.name,
StorageType="AllStorageTypes", resource=cloudwatch)
bucket.NumberOfObjects = int(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None
data = get_cloudwatch_metric_stats("AWS/S3", "BucketSizeBytes",
start_time=datetime.utcnow() - timedelta(days=2),
end_time=datetime.utcnow(), period=3600, BucketName=bucket.name,
StorageType="StandardStorage", resource=cloudwatch)
bucket.BucketSizeBytes = format_number(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None
table.append(bucket)
page_output(tabulate(table, args)) | List S3 buckets. See also "aws s3 ls". Use "aws s3 ls NAME" to list bucket contents. |
21,147 | def write_peps(self, peps, reverse_seqs):
if reverse_seqs:
peps = [(x[0][::-1],) for x in peps]
cursor = self.get_cursor()
cursor.executemany(
, peps)
self.conn.commit() | Writes peps to db. We can reverse to be able to look up
peptides that have some amino acids missing at the N-terminal.
This way we can still use the index. |
21,148 | def merge(self, data, clean=False, validate=False):
try:
model = self.__class__(data)
except ConversionError as errors:
abort(self.to_exceptions(errors.messages))
for key, val in model.to_native().items():
if key in data:
setattr(self, key, val)
if validate:
try:
self.validate()
except ModelValidationError as errors:
abort(self.to_exceptions(errors.messages))
if clean:
self._original = self.to_native() | Merge a dict with the model
This is needed because schematics doesn't auto cast
values when assigned. This method allows us to ensure
incoming data & existing data on a model are always
coerced properly.
We create a temporary model instance with just the new
data so all the features of schematics deserialization
are still available.
:param data:
dict of potentially new different data to merge
:param clean:
set the dirty bit back to clean. This is useful
when the merge is coming from the store where
the data could have been mutated & the new merged
in data is now the single source of truth.
:param validate:
run the schematics validate method
:return:
nothing.. it has mutation side effects |
21,149 | def image_list(auth=None, **kwargs):
**
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.list_images(**kwargs) | List images
CLI Example:
.. code-block:: bash
salt '*' glanceng.image_list
salt '*' glanceng.image_list |
21,150 | def _init(self):
for goid in self.godag.go_sources:
goobj = self.godag.go2obj[goid]
self.godag.go2obj[goid] = goobj
if self.traverse_parent and goid not in self.seen_cids:
self._traverse_parent_objs(goobj)
if self.traverse_child and goid not in self.seen_pids:
self._traverse_child_objs(goobj) | Given GO ids and GOTerm objects, create mini GO dag. |
21,151 | def f1_score(y_true, y_pred, average=, suffix=False):
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
score = 2 * p * r / (p + r) if p + r > 0 else 0
return score | Compute the F1 score.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import f1_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> f1_score(y_true, y_pred)
0.50 |
21,152 | def _parse_memory_embedded_health(self, data):
memory_mb = 0
memory = self._get_memory_details_value_based_on_model(data)
if memory is None:
msg = "Unable to get memory data. Error: Data missing"
raise exception.IloError(msg)
total_memory_size = 0
for memory_item in memory:
memsize = memory_item[self.MEMORY_SIZE_TAG]["VALUE"]
if memsize != self.MEMORY_SIZE_NOT_PRESENT_TAG:
memory_bytes = (
strutils.string_to_bytes(
memsize.replace(, ), return_int=True))
memory_mb = int(memory_bytes / (1024 * 1024))
total_memory_size = total_memory_size + memory_mb
return total_memory_size | Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: memory size in MB.
:raises IloError, if unable to get the memory details. |
21,153 | def is_ipv6_ok(soft_fail=False):
if os.path.isdir():
if not is_module_loaded():
try:
modprobe()
return True
except subprocess.CalledProcessError as ex:
hookenv.log("Couldnt working
if soft_fail:
return False | Check if IPv6 support is present and ip6tables functional
:param soft_fail: If set to True and IPv6 support is broken, then reports
that the host doesn't have IPv6 support, otherwise a
UFWIPv6Error exception is raised.
:returns: True if IPv6 is working, False otherwise |
21,154 | def rowsBeforeValue(self, value, count):
if value is None:
query = self.inequalityQuery(None, count, False)
else:
pyvalue = self._toComparableValue(value)
currentSortAttribute = self.currentSortColumn.sortAttribute()
query = self.inequalityQuery(
currentSortAttribute < pyvalue, count, False)
return self.constructRows(query)[::-1] | Retrieve display data for rows with sort-column values less than the
given value.
@type value: Some type compatible with the current sort column.
@param value: Starting value in the index for the current sort column
at which to start returning results. Rows with a column value for the
current sort column which is less than this value will be returned.
@type count: C{int}
@param count: The number of rows to return.
@return: A list of row data, ordered by the current sort column, ending
at C{value} and containing at most C{count} elements. |
21,155 | def set_wrappable_term(self, v, term):
import textwrap
for t in self[].find(term):
self.remove_term(t)
for l in textwrap.wrap(v, 80):
self[].new_term(term, l) | Set the Root.Description, possibly splitting long descriptions across multiple terms. |
21,156 | def _arburg2(X, order):
x = np.array(X)
N = len(x)
if order <= 0.:
raise ValueError("order must be > 0")
rho = sum(abs(x)**2.) / N
den = rho * 2. * N
ef = np.zeros(N, dtype=complex)
eb = np.zeros(N, dtype=complex)
for j in range(0, N):
ef[j] = x[j]
eb[j] = x[j]
a = np.zeros(1, dtype=complex)
a[0] = 1
ref = np.zeros(order, dtype=complex)
temp = 1.
E = np.zeros(order+1)
E[0] = rho
for m in range(0, order):
efp = ef[1:]
ebp = eb[0:-1]
num = -2.* np.dot(ebp.conj().transpose(), efp)
den = np.dot(efp.conj().transpose(), efp)
den += np.dot(ebp, ebp.conj().transpose())
ref[m] = num / den
ef = efp + ref[m] * ebp
eb = ebp + ref[m].conj().transpose() * efp
a.resize(len(a)+1)
a = a + ref[m] * np.flipud(a).conjugate()
E[m+1] = (1 - ref[m].conj().transpose()*ref[m]) * E[m]
return a, E[-1], ref | This version is 10 times faster than arburg, but the output rho is not correct.
returns [1 a0,a1, an-1] |
21,157 | def join_state_collections( collection_a, collection_b):
return StateCollection(
(collection_a.states + collection_b.states),
{ grouping_name:_combined_grouping_values(grouping_name, collection_a,collection_b)
for grouping_name in set( list(collection_a.groupings.keys()) + list(collection_b.groupings.keys()) ) }) | Warning: This is a very naive join. Only use it when measures and groups will remain entirely within each subcollection.
For example: if each collection has states grouped by date and both include the same date, then the new collection
would have both of those groups, likely causing problems for group measures and potentially breaking many things. |
21,158 | def RAMON(typ):
if six.PY2:
lookup = [str, unicode]
elif six.PY3:
lookup = [str]
if type(typ) is int:
return _ramon_types[typ]
elif type(typ) in lookup:
return _ramon_types[_types[typ]] | Takes str or int, returns class type |
21,159 | def serach_path():
operating_system = get_os()
return [os.path.expanduser("~/.kerncraft/iaca/{}/".format(operating_system)),
os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + .format(
operating_system)] | Return potential locations of IACA installation. |
21,160 | def subscribe_topic(self, topics=[], pattern=None):
if not isinstance(topics, list):
topics = [topics]
self.consumer.subscribe(topics, pattern=pattern) | Subscribe to a list of topics, or a topic regex pattern.
- ``topics`` (list): List of topics for subscription.
- ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern,
but not both. |
21,161 | def dispatch(self, receiver):
super(SessionCallbackAdded, self).dispatch(receiver)
if hasattr(receiver, ):
receiver._session_callback_added(self) | Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_added`` if
it exists. |
21,162 | def get_all_scores(self, motifs, dbmotifs, match, metric, combine,
pval=False, parallel=True, trim=None, ncpus=None):
if trim:
for m in motifs:
m.trim(trim)
for m in dbmotifs:
m.trim(trim)
scores = {}
if parallel:
if ncpus is None:
ncpus = int(MotifConfig().get_default_params()["ncpus"])
pool = Pool(processes=ncpus, maxtasksperchild=1000)
batch_len = len(dbmotifs) // ncpus
if batch_len <= 0:
batch_len = 1
jobs = []
for i in range(0, len(dbmotifs), batch_len):
p = pool.apply_async(_get_all_scores,
args=(self, motifs, dbmotifs[i: i + batch_len], match, metric, combine, pval))
jobs.append(p)
pool.close()
for job in jobs:
result = job.get()
for m1,v in result.items():
for m2, s in v.items():
if m1 not in scores:
scores[m1] = {}
scores[m1][m2] = s
pool.join()
else:
scores = _get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval)
return scores | Pairwise comparison of a set of motifs compared to reference motifs.
Parameters
----------
motifs : list
List of Motif instances.
dbmotifs : list
List of Motif instances.
match : str
Match can be "partial", "subtotal" or "total". Not all metrics use
this.
metric : str
Distance metric.
combine : str
Combine positional scores using "mean" or "sum". Not all metrics
use this.
pval : bool , optional
Calculate p-vale of match.
parallel : bool , optional
Use multiprocessing for parallel execution. True by default.
trim : float or None
If a float value is specified, motifs are trimmed used this IC
cutoff before comparison.
ncpus : int or None
Specifies the number of cores to use for parallel execution.
Returns
-------
scores : dict
Dictionary with scores. |
21,163 | def xml_findall(xpath):
def xpath_findall(value):
validate(ET.iselement, value)
return value.findall(xpath)
return transform(xpath_findall) | Find a list of XML elements via xpath. |
21,164 | def data(self):
header = struct.pack(,
4,
self.created,
self.algo_id)
oid = util.prefix_len(, self.curve_info[])
blob = self.curve_info[](self.verifying_key)
return header + oid + blob + self.ecdh_packet | Data for packet creation. |
21,165 | def add_log_type(name, display, color, bcolor):
global MESSAGE_LOG
v_name = name.replace(" ", "_").upper()
val = 0
lkey = MESSAGE_LOG.keys()
while val in lkey:
val += 1
MESSAGE_LOG[val] = [v_name, (display, color, bcolor,)]
setattr(LOG, v_name, val) | name : call name (A-Z and '_')
display : display message in [-]
color : text color (see bashutils.colors)
bcolor : background color (see bashutils.colors) |
21,166 | def do_macro_arg(parser, token):
parser.delete_first_token()
return MacroArgNode(nodelist) | Function taking a parsed template tag
to a MacroArgNode. |
21,167 | def has_abiext(self, ext, single_file=True):
if ext != "abo":
ext = ext if ext.startswith() else + ext
files = []
for f in self.list_filepaths():
if ext == "_DDB" and f.endswith(".nc"): continue
if ext == "_MDF" and not f.endswith(".nc"): continue
if ext == "_DDK" and f.endswith(".nc"): continue
if f.endswith(ext) or f.endswith(ext + ".nc"):
files.append(f)
if not files:
files = [f for f in self.list_filepaths() if fnmatch(f, "*%s*" % ext)]
if not files:
return ""
if len(files) > 1 and single_file:
raise ValueError("Found multiple files with the same extensions:\n %s\n" % files +
"Please avoid using multiple datasets!")
return files[0] if single_file else files | Returns the absolute path of the ABINIT file with extension ext.
Support both Fortran files and netcdf files. In the later case,
we check whether a file with extension ext + ".nc" is present
in the directory. Returns empty string is file is not present.
Raises:
`ValueError` if multiple files with the given ext are found.
This implies that this method is not compatible with multiple datasets. |
21,168 | def largest_graph(mol):
mol.require("Valence")
mol.require("Topology")
m = clone(mol)
if m.isolated:
for k in itertools.chain.from_iterable(m.isolated):
m.remove_atom(k)
return m | Return a molecule which has largest graph in the compound
Passing single molecule object will results as same as molutil.clone |
21,169 | def _pushMessages(self):
self.showStatus()
if len(self._statusMsgsToShow) > 0:
self.top.after(200, self._pushMessages) | Internal callback used to make sure the msg list keeps moving. |
21,170 | def copy(self):
if self.select_line_on_copy_empty and not self.textCursor().hasSelection():
TextHelper(self).select_whole_line()
super(CodeEdit, self).copy() | Copy the selected text to the clipboard. If no text was selected, the
entire line is copied (this feature can be turned off by
setting :attr:`select_line_on_copy_empty` to False. |
21,171 | def _compute_ymean(self, **kwargs):
y = np.asarray(kwargs.get(, self.y))
dy = np.asarray(kwargs.get(, self.dy))
if dy.size == 1:
return np.mean(y)
else:
return np.average(y, weights=1 / dy ** 2) | Compute the (weighted) mean of the y data |
21,172 | async def processClaims(self, allClaims: Dict[ID, Claims]):
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res | Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition. |
21,173 | def initiate_close(self):
self._running = False
self._accumulator.close()
self.wakeup() | Start closing the sender (won't complete until all data is sent). |
21,174 | def list_ctx(self):
if self._data is None:
if self._deferred_init:
return self._deferred_init[1]
raise RuntimeError("Parameter has not been initialized"%self.name)
return self._ctx_list | Returns a list of contexts this parameter is initialized on. |
21,175 | def create_user(self, user_name, path=):
params = { : user_name,
: path}
return self.get_response(, params) | Create a user.
:type user_name: string
:param user_name: The name of the new user
:type path: string
:param path: The path in which the user will be created.
Defaults to /. |
21,176 | def _render(self, template, context, is_file, at_paths=None,
at_encoding=ENCODING, **kwargs):
eopts = self.filter_options(kwargs, self.engine_valid_options())
self._env_options.update(eopts)
loader = FileSystemExLoader(at_paths, encoding=at_encoding.lower(),
enable_glob=True)
env = jinja2.Environment(loader=loader, **self._env_options)
if kwargs:
context.update(kwargs)
try:
tmpl = (env.get_template if is_file else env.from_string)(template)
return tmpl.render(**context)
except jinja2.exceptions.TemplateNotFound as exc:
raise TemplateNotFound(str(exc)) | Render given template string and return the result.
:param template: Template content
:param context: A dict or dict-like object to instantiate given
template file
:param is_file: True if given `template` is a filename
:param at_paths: Template search paths
:param at_encoding: Template encoding
:param kwargs: Keyword arguments passed to jinja2.Envrionment. Please
note that 'loader' option is not supported because anytemplate does
not support to load template except for files
:return: Rendered string |
21,177 | def get_all_instances(self, instance_ids=None, filters=None):
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, )
if filters:
if in filters:
gid = filters.get()
if not gid.startswith() or len(gid) != 11:
warnings.warn(
"The group-id filter now requires a security group "
"identifier (sg-*) instead of a group name. To filter "
"by group name use the filter instead.",
UserWarning)
self.build_filter_params(params, filters)
return self.get_list(, params,
[(, Reservation)], verb=) | Retrieve all the instances associated with your account.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation` |
21,178 | def predict(self, x):
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i][0:x.size]) + \
self._weightsMatrix[i][x.size]
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i])
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class | Predict values for a single data point or an RDD of points
using the model trained. |
21,179 | def load(filename):
path, name = os.path.split(filename)
path = path or
with util.indir(path):
return pickle.load(open(name, )) | Load the state from the given file, moving to the file's directory during
load (temporarily, moving back after loaded)
Parameters
----------
filename : string
name of the file to open, should be a .pkl file |
21,180 | def get_form(self, request, obj=None, **kwargs):
template = get_template_from_request(request, obj)
form = make_form(self.model, get_placeholders(template))
language = get_language_from_request(request)
form.base_fields[].initial = language
if obj:
initial_slug = obj.slug(language=language, fallback=False)
initial_title = obj.title(language=language, fallback=False)
form.base_fields[].initial = initial_slug
form.base_fields[].initial = initial_title
template = get_template_from_request(request, obj)
page_templates = settings.get_page_templates()
template_choices = list(page_templates)
if not [tpl for tpl in template_choices if tpl[0] == settings.PAGE_DEFAULT_TEMPLATE]:
template_choices.insert(0, (settings.PAGE_DEFAULT_TEMPLATE,
_()))
form.base_fields[].choices = template_choices
form.base_fields[].initial = force_text(template)
for placeholder in get_placeholders(template):
ctype = placeholder.ctype
if obj:
initial = placeholder.get_content(obj, language, lang_fallback=False)
else:
initial = None
form.base_fields[ctype] = placeholder.get_field(obj,
language, initial=initial)
return form | Get a :class:`Page <pages.admin.forms.PageForm>` for the
:class:`Page <pages.models.Page>` and modify its fields depending on
the request. |
21,181 | def abort_copy_file(self, share_name, directory_name, file_name, copy_id, timeout=None):
_validate_not_none(, share_name)
_validate_not_none(, file_name)
_validate_not_none(, copy_id)
request = HTTPRequest()
request.method =
request.host_locations = self._get_host_locations()
request.path = _get_path(share_name, directory_name, file_name)
request.query = {
: ,
: _to_str(copy_id),
: _int_to_str(timeout),
}
request.headers = {
: ,
}
self._perform_request(request) | Aborts a pending copy_file operation, and leaves a destination file
with zero length and full metadata.
:param str share_name:
Name of destination share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of destination file.
:param str copy_id:
Copy identifier provided in the copy.id of the original
copy_file operation.
:param int timeout:
The timeout parameter is expressed in seconds. |
21,182 | def reconstructed_pixelization_from_solution_vector(self, solution_vector):
recon = mapping_util.map_unmasked_1d_array_to_2d_array_from_array_1d_and_shape(array_1d=solution_vector,
shape=self.shape)
return scaled_array.ScaledRectangularPixelArray(array=recon, pixel_scales=self.geometry.pixel_scales,
origin=self.geometry.origin) | Given the solution vector of an inversion (see *inversions.Inversion*), determine the reconstructed \
pixelization of the rectangular pixelization by using the mapper. |
21,183 | def handle_exception(exc_info=None, source_hint=None, tb_override=_NO):
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
if _make_traceback is None:
from .runtime.debug import make_traceback as _make_traceback
exc_type, exc_value, tb = exc_info
if tb_override is not _NO:
tb = tb_override
traceback = _make_traceback((exc_type, exc_value, tb), source_hint)
exc_type, exc_value, tb = traceback.standard_exc_info
reraise(exc_type, exc_value, tb) | Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template. |
21,184 | def traverse_layout(root, callback):
callback(root)
if isinstance(root, collections.Iterable):
for child in root:
traverse_layout(child, callback) | Tree walker and invokes the callback as it
traverse pdf object tree |
21,185 | def _check_file(self):
if not os.path.exists(self.file_path):
return False
self._migrate()
config = configparser.RawConfigParser()
config.read(self.file_path)
try:
config.get(
escape_for_ini(),
escape_for_ini(),
)
except (configparser.NoSectionError, configparser.NoOptionError):
return False
try:
self._check_scheme(config)
except AttributeError:
return True
return self._check_version(config) | Check if the file exists and has the expected password reference. |
21,186 | def deregister(self, reg_data, retry=True, interval=1, timeout=3):
Retry(target=self.publish.direct.delete,
args=("/controller/registration", reg_data,),
kwargs={"timeout": timeout},
options={"retry": retry, "interval": interval})
_logger.debug("Deregister successfully %s tunnel: %s" %
(reg_data["name"],
self._conn.tunnels[reg_data["role"]][0],)) | Deregister model/view of this bundle |
21,187 | def metadata(self):
md = self.xml(src="docProps/core.xml")
if md is None:
md = XML(root=etree.Element("{%(cp)s}metadata" % self.NS))
return md.root | return a cp:metadata element with the metadata in the document |
21,188 | def _add_sub_elements_from_dict(parent, sub_dict):
for key, value in sub_dict.items():
if isinstance(value, list):
for repeated_element in value:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, repeated_element.get("attrs", {}))
children = repeated_element.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children
else:
sub_element = ET.SubElement(parent, key)
_add_element_attrs(sub_element, value.get("attrs", {}))
children = value.get("children", None)
if isinstance(children, dict):
_add_sub_elements_from_dict(sub_element, children)
elif isinstance(children, str):
sub_element.text = children | Add SubElements to the parent element.
:param parent: ElementTree.Element: The parent element for the newly created SubElement.
:param sub_dict: dict: Used to create a new SubElement. See `dict_to_xml_schema`
method docstring for more information. e.g.:
{"example": {
"attrs": {
"key1": "value1",
...
},
...
}} |
21,189 | def recarrayequalspairs(X,Y,weak=True):
if (weak and set(X.dtype.names) != set(Y.dtype.names)) or \
(not weak and X.dtype.names != Y.dtype.names):
return [np.zeros((len(X),),int),np.zeros((len(X),),int),None]
else:
if X.dtype.names != Y.dtype.names:
Y = np.rec.fromarrays([Y[a] for a in X.dtype.names],
names= X.dtype.names)
NewX = np.array([str(l) for l in X])
NewY = np.array([str(l) for l in Y])
s = NewY.argsort() ; NewY.sort()
[A,B] = equalspairs(NewX,NewY)
return [A,B,s] | Indices of elements in a sorted numpy recarray (or ndarray with
structured dtype) equal to those in another.
Record array version of func:`tabular.fast.equalspairs`, but slightly
different because the concept of being sorted is less well-defined for a
record array.
Given numpy recarray `X` and sorted numpy recarray `Y`, determine the
indices in Y equal to indices in X.
Returns `[A,B,s]` where `s` is a permutation of `Y` such that for::
Y = X[s]
we have::
Y[A[i]:B[i]] = Y[Y == X[i]]
`A[i] = B[i] = 0` if `X[i]` is not in `Y`.
**Parameters**
**X** : numpy recarray
Numpy recarray to compare to the sorted numpy recarray `Y`.
**Y** : numpy recarray
Sorted numpy recarray. Determine the indices of elements
of `Y` equal to those in numpy array `X`.
**Returns**
**A** : numpy array
List of indices in `Y`, `len(A) = len(Y)`.
**B** : numpy array
List of indices in `Y`, `len(B) = len(Y)`.
**s** : numpy array
Permutation of `Y`.
**See Also:**
:func:`tabular.fast.recarrayequalspairs` |
21,190 | def replica_lag(self, **kwargs):
if not self._use_replica():
return 0
try:
kwargs[] = self.stack_mark(inspect.stack())
sql = "select EXTRACT(EPOCH FROM NOW() - pg_last_xact_replay_timestamp()) AS replication_lag"
return self.collection_instance(
self.db_adapter().raw_query(
sql=sql, **kwargs
)
).squeeze()
except:
return 0 | Returns the current replication lag in seconds between the master and replica databases.
:returns: float |
21,191 | def disk_pick_polar(n=1, rng=None):
if rng is None:
rng = np.random
a = np.zeros([n, 2], dtype=np.float)
a[:, 0] = np.sqrt(rng.uniform(size=n))
a[:, 1] = rng.uniform(0.0, 2.0 * np.pi, size=n)
return a | Return vectors uniformly picked on the unit disk.
The unit disk is the space enclosed by the unit circle.
Vectors are in a polar representation.
Parameters
----------
n: integer
Number of points to return.
Returns
-------
r: array, shape (n, 2)
Sample vectors. |
21,192 | def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %r" % (
_dict_path_string(dict_path),
expected_keys - actual_keys,
)
assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %r" % (
_dict_path_string(dict_path),
actual_keys - expected_keys,
)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(
actual[k],
type(expected[k]),
extra="Types dont match for %s" % _dict_path_string(key_path),
)
if isinstance(actual[k], dict):
assert_dict_eq(
expected[k],
actual[k],
number_tolerance=number_tolerance,
dict_path=key_path,
)
elif isinstance(actual[k], _number_types):
assert_eq(
expected[k],
actual[k],
extra="Value doesnt match for %s" % _dict_path_string(key_path),
) | Asserts that two dictionaries are equal, producing a custom message if they are not. |
21,193 | def _unascii(s):
chunks = []
pos = 0
while m:
start = m.start()
end = m.end()
g = m.group(1)
if g is None:
chunks.append(s[pos:end])
else:
| Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8
This method takes the output of the JSONEncoder and expands any \\uNNNN
escapes it finds (except for \\u0000 to \\u001F, which are converted to
\\xNN escapes).
For performance, it assumes that the input is valid JSON, and performs few
sanity checks. |
21,194 | def cat(ctx, archive_name, version):
_generate_api(ctx)
var = ctx.obj.api.get_archive(archive_name)
with var.open(, version=version) as f:
for chunk in iter(lambda: f.read(1024 * 1024), ):
click.echo(chunk) | Echo the contents of an archive |
21,195 | def metadata_converter_help_content():
message = m.Message()
paragraph = m.Paragraph(tr(
))
message.add(paragraph)
paragraph = m.Paragraph(tr(
))
message.add(paragraph)
return message | Helper method that returns just the content in extent mode.
This method was added so that the text could be reused in the
wizard.
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message |
21,196 | def dumps(data):
if not isinstance(data, _TOMLDocument) and isinstance(data, dict):
data = item(data)
return data.as_string() | Dumps a TOMLDocument into a string. |
21,197 | def _get_table_data(self):
data = self._simplify_shape(
self.table_widget.get_data())
if self.table_widget.array_btn.isChecked():
return array(data)
elif pd and self.table_widget.df_btn.isChecked():
info = self.table_widget.pd_info
buf = io.StringIO(self.table_widget.pd_text)
return pd.read_csv(buf, **info)
return data | Return clipboard processed as data |
21,198 | def beam_search(self, text:str, n_words:int, no_unk:bool=True, top_k:int=10, beam_sz:int=1000, temperature:float=1.,
sep:str=, decoder=decode_spec_tokens):
"Return the `n_words` that come after `text` using beam search."
ds = self.data.single_dl.dataset
self.model.reset()
xb, yb = self.data.one_item(text)
nodes = None
xb = xb.repeat(top_k, 1)
nodes = xb.clone()
scores = xb.new_zeros(1).float()
with torch.no_grad():
for k in progress_bar(range(n_words), leave=False):
out = F.log_softmax(self.model(xb)[0][:,-1], dim=-1)
if no_unk: out[:,self.data.vocab.stoi[UNK]] = -float()
values, indices = out.topk(top_k, dim=-1)
scores = (-values + scores[:,None]).view(-1)
indices_idx = torch.arange(0,nodes.size(0))[:,None].expand(nodes.size(0), top_k).contiguous().view(-1)
sort_idx = scores.argsort()[:beam_sz]
scores = scores[sort_idx]
nodes = torch.cat([nodes[:,None].expand(nodes.size(0),top_k,nodes.size(1)),
indices[:,:,None].expand(nodes.size(0),top_k,1),], dim=2)
nodes = nodes.view(-1, nodes.size(2))[sort_idx]
self.model[0].select_hidden(indices_idx[sort_idx])
xb = nodes[:,-1][:,None]
if temperature != 1.: scores.div_(temperature)
node_idx = torch.multinomial(torch.exp(-scores), 1).item()
return text + sep + sep.join(decoder(self.data.vocab.textify([i.item() for i in nodes[node_idx][1:] ], sep=None))) | Return the `n_words` that come after `text` using beam search. |
21,199 | def set_access_port(self, port_number, vlan_id):
if port_number not in self._nios:
raise DynamipsError("Port {} is not allocated".format(port_number))
nio = self._nios[port_number]
yield from self._hypervisor.send(.format(name=self._name,
nio=nio,
vlan_id=vlan_id))
log.info(.format(name=self._name,
id=self._id,
port=port_number,
vlan_id=vlan_id))
self._mappings[port_number] = ("access", vlan_id) | Sets the specified port as an ACCESS port.
:param port_number: allocated port number
:param vlan_id: VLAN number membership |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.