Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
388,900 | def update(self, response, **kwargs):
response_cls = self._get_instance(**kwargs)
if response_cls:
setattr(response_cls, self.column, self.accessor(response))
_action_and_commit(response_cls, session.add)
else:
self.get_or_create_from_legacy_response(response, **kwargs) | If a record matching the instance already exists in the database, update
it, else create a new record. |
388,901 | def preferences_view(request):
user = request.user
if request.method == "POST":
logger.debug(dict(request.POST))
phone_formset, email_formset, website_formset, errors = save_personal_info(request, user)
if user.is_student:
preferred_pic_form = save_preferred_pic(request, user)
bus_route_form = save_bus_route(request, user)
else:
preferred_pic_form = None
bus_route_form = None
privacy_options_form = save_privacy_options(request, user)
notification_options_form = save_notification_options(request, user)
for error in errors:
messages.error(request, error)
try:
save_gcm_options(request, user)
except AttributeError:
pass
return redirect("preferences")
else:
phone_formset = PhoneFormset(instance=user, prefix=)
email_formset = EmailFormset(instance=user, prefix=)
website_formset = WebsiteFormset(instance=user, prefix=)
if user.is_student:
preferred_pic = get_preferred_pic(user)
bus_route = get_bus_route(user)
logger.debug(preferred_pic)
preferred_pic_form = PreferredPictureForm(user, initial=preferred_pic)
bus_route_form = BusRouteForm(user, initial=bus_route)
else:
bus_route_form = None
preferred_pic = None
preferred_pic_form = None
privacy_options = get_privacy_options(user)
logger.debug(privacy_options)
privacy_options_form = PrivacyOptionsForm(user, initial=privacy_options)
notification_options = get_notification_options(user)
logger.debug(notification_options)
notification_options_form = NotificationOptionsForm(user, initial=notification_options)
context = {
"phone_formset": phone_formset,
"email_formset": email_formset,
"website_formset": website_formset,
"preferred_pic_form": preferred_pic_form,
"privacy_options_form": privacy_options_form,
"notification_options_form": notification_options_form,
"bus_route_form": bus_route_form if settings.ENABLE_BUS_APP else None
}
return render(request, "preferences/preferences.html", context) | View and process updates to the preferences page. |
388,902 | def split_gtf(gtf, sample_size=None, out_dir=None):
if out_dir:
part1_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part1.gtf"
part2_fn = os.path.basename(os.path.splitext(gtf)[0]) + ".part2.gtf"
part1 = os.path.join(out_dir, part1_fn)
part2 = os.path.join(out_dir, part2_fn)
if file_exists(part1) and file_exists(part2):
return part1, part2
else:
part1 = tempfile.NamedTemporaryFile(delete=False, suffix=".part1.gtf").name
part2 = tempfile.NamedTemporaryFile(delete=False, suffix=".part2.gtf").name
db = get_gtf_db(gtf)
gene_ids = set([x[][0] for x in db.all_features()])
if not sample_size or (sample_size and sample_size > len(gene_ids)):
sample_size = len(gene_ids)
gene_ids = set(random.sample(gene_ids, sample_size))
part1_ids = set(random.sample(gene_ids, sample_size / 2))
part2_ids = gene_ids.difference(part1_ids)
with open(part1, "w") as part1_handle:
for gene in part1_ids:
for feature in db.children(gene):
part1_handle.write(str(feature) + "\n")
with open(part2, "w") as part2_handle:
for gene in part2_ids:
for feature in db.children(gene):
part2_handle.write(str(feature) + "\n")
return part1, part2 | split a GTF file into two equal parts, randomly selecting genes.
sample_size will select up to sample_size genes in total |
388,903 | def discretize(value, factor=100):
if not isinstance(value, Iterable):
return int(value * factor)
int_value = list(deepcopy(value))
for i in range(len(int_value)):
int_value[i] = int(int_value[i] * factor)
return int_value | Discretize the given value, pre-multiplying by the given factor |
388,904 | def build_ricecooker_json_tree(args, options, metadata_provider, json_tree_path):
LOGGER.info()
channeldir = args[]
if channeldir.endswith(os.path.sep):
channeldir.rstrip(os.path.sep)
channelparentdir, channeldirname = os.path.split(channeldir)
channelparentdir, channeldirname = os.path.split(channeldir)
channel_info = metadata_provider.get_channel_info()
thumbnail_chan_path = channel_info.get(, None)
if thumbnail_chan_path:
thumbnail_rel_path = rel_path_from_chan_path(thumbnail_chan_path, metadata_provider.channeldir)
else:
thumbnail_rel_path = None
ricecooker_json_tree = dict(
dirname=channeldirname,
title=channel_info[],
description=channel_info[],
source_domain=channel_info[],
source_id=channel_info[],
language=channel_info[],
thumbnail=thumbnail_rel_path,
children=[],
)
channeldir = args[]
content_folders = sorted(os.walk(channeldir))
for rel_path, _subfolders, filenames in content_folders:
LOGGER.info( + str(rel_path))
if metadata_provider.has_exercises():
dir_chan_path = chan_path_from_rel_path(rel_path, metadata_provider.channeldir)
dir_path_tuple = path_to_tuple(dir_chan_path)
exercises_filenames = metadata_provider.get_exercises_for_dir(dir_path_tuple)
filenames.extend(exercises_filenames)
sorted_filenames = sorted(filenames)
process_folder(ricecooker_json_tree, rel_path, sorted_filenames, metadata_provider)
write_tree_to_json_tree(json_tree_path, ricecooker_json_tree)
LOGGER.info( + json_tree_path) | Download all categories, subpages, modules, and resources from open.edu. |
388,905 | def sample(self):
lenghts = []
angles = []
coordinates = []
fix = []
sample_size = int(round(self.trajLen_borders[self.drawFrom(, self.getrand())]))
coordinates.append([0, 0])
fix.append(1)
while len(coordinates) < sample_size:
if len(lenghts) == 0 and len(angles) == 0:
angle, length = self._draw(self)
else:
angle, length = self._draw(prev_angle = angles[-1],
prev_length = lenghts[-1])
x, y = self._calc_xy(coordinates[-1], angle, length)
coordinates.append([x, y])
lenghts.append(length)
angles.append(angle)
fix.append(fix[-1]+1)
return coordinates | Draws a trajectory length, first coordinates, lengths, angles and
length-angle-difference pairs according to the empirical distribution.
Each call creates one complete trajectory. |
388,906 | def generate(self):
observed_arr = None
for result_tuple in self.__feature_generator.generate():
observed_arr = result_tuple[0]
break
if self.noise_sampler is not None:
self.noise_sampler.output_shape = observed_arr.shape
observed_arr += self.noise_sampler.generate()
observed_arr = observed_arr.astype(float)
if self.__norm_mode == "z_score":
if observed_arr.std() != 0:
observed_arr = (observed_arr - observed_arr.mean()) / observed_arr.std()
elif self.__norm_mode == "min_max":
if (observed_arr.max() - observed_arr.min()) != 0:
observed_arr = (observed_arr - observed_arr.min()) / (observed_arr.max() - observed_arr.min())
elif self.__norm_mode == "tanh":
observed_arr = np.tanh(observed_arr)
return observed_arr | Draws samples from the `true` distribution.
Returns:
`np.ndarray` of samples. |
388,907 | def add_member_roles(self, guild_id: int, member_id: int, roles: List[int]):
current_roles = [role for role in self.get_guild_member_by_id(guild_id, member_id)[]]
roles.extend(current_roles)
new_list = list(set(roles))
self.set_member_roles(guild_id, member_id, new_list) | Add roles to a member
This method takes a list of **role ids** that you want to give to the user,
on top of whatever roles they may already have. This method will fetch
the user's current roles, and add to that list the roles passed in. The
user's resulting list of roles will not contain duplicates, so you don't have
to filter role ids to this method (as long as they're still roles for this guild).
This method differs from ``set_member_roles`` in that this method ADDS roles
to the user's current role list. ``set_member_roles`` is used by this method.
Args:
guild_id: snowflake id of the guild
member_id: snowflake id of the member
roles: list of snowflake ids of roles to add |
388,908 | def ConsultarCortes(self, sep="||"):
"Retorna listado de cortes -carnes- (código, descripción)"
ret = self.client.consultarCortes(
auth={
: self.Token, : self.Sign,
: self.Cuit, },
)[]
self.__analizar_errores(ret)
array = ret.get(, []) + ret.get(, [])
if sep is None:
return dict([(it[], it[]) for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it[], it[]) for it in array] | Retorna listado de cortes -carnes- (código, descripción) |
388,909 | def _realsize(self):
current = self
size= 0
while current is not None:
size += current._parser.sizeof(current)
last = current
current = getattr(current, , None)
size += len(getattr(last, , b))
return size | Get the struct size without padding (or the "real size")
:returns: the "real size" in bytes |
388,910 | def update_wallet(self, wallet_name, limit):
request = {
: {
: str(limit),
}
}
return make_request(
.format(self.url, wallet_name),
method=,
body=request,
timeout=self.timeout,
client=self._client) | Update a wallet with a new limit.
@param the name of the wallet.
@param the new value of the limit.
@return a success string from the plans server.
@raise ServerError via make_request. |
388,911 | def _build_archive(self, dir_path):
zip_path = os.path.join(dir_path, "import.zip")
archive = zipfile.ZipFile(zip_path, "w")
for filename in CSV_FILES:
filepath = os.path.join(dir_path, filename)
if os.path.exists(filepath):
archive.write(filepath, filename, zipfile.ZIP_DEFLATED)
archive.close()
with open(zip_path, "rb") as f:
body = f.read()
return body | Creates a zip archive from files in path. |
388,912 | def index_spacing(self, value):
if not isinstance(value, bool):
raise TypeError()
self._index_spacing = value | Validate and set the index_spacing flag. |
388,913 | def get_categories(self):
posts = self.get_posts(include_draft=True)
result = {}
for post in posts:
for category_name in set(post.categories):
result[category_name] = result.setdefault(
category_name,
Pair(0, 0)) + Pair(1, 0 if post.is_draft else 1)
return list(result.items()) | Get all categories and post count of each category.
:return dict_item(category_name, Pair(count_all, count_published)) |
388,914 | def get_parallel_value_for_key(self, key):
if self._remotelib:
return self._remotelib.run_keyword(,
[key], {})
return _PabotLib.get_parallel_value_for_key(self, key) | Get the value for a key. If there is no value for the key then empty
string is returned. |
388,915 | def delete_namespaced_cron_job(self, name, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
else:
(data) = self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs)
return data | delete_namespaced_cron_job # noqa: E501
delete a CronJob # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_cron_job(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CronJob (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread. |
388,916 | def write_contents_to_file(self, entities, path_patterns=None,
contents=None, link_to=None,
content_mode=, conflicts=,
strict=False, domains=None, index=False,
index_domains=None):
path = self.build_path(entities, path_patterns, strict, domains)
if path is None:
raise ValueError("Cannot construct any valid filename for "
"the passed entities given available path "
"patterns.")
write_contents_to_file(path, contents=contents, link_to=link_to,
content_mode=content_mode, conflicts=conflicts,
root=self.root)
if index:
if index_domains is None:
index_domains = list(self.domains.keys())
self._index_file(self.root, path, index_domains) | Write arbitrary data to a file defined by the passed entities and
path patterns.
Args:
entities (dict): A dictionary of entities, with Entity names in
keys and values for the desired file in values.
path_patterns (list): Optional path patterns to use when building
the filename. If None, the Layout-defined patterns will be
used.
contents (object): Contents to write to the generate file path.
Can be any object serializable as text or binary data (as
defined in the content_mode argument).
conflicts (str): One of 'fail', 'skip', 'overwrite', or 'append'
that defines the desired action when the output path already
exists. 'fail' raises an exception; 'skip' does nothing;
'overwrite' overwrites the existing file; 'append' adds a suffix
to each file copy, starting with 1. Default is 'fail'.
strict (bool): If True, all entities must be matched inside a
pattern in order to be a valid match. If False, extra entities
will be ignored so long as all mandatory entities are found.
domains (list): List of Domains to scan for path_patterns. Order
determines precedence (i.e., earlier Domains will be scanned
first). If None, all available domains are included.
index (bool): If True, adds the generated file to the current
index using the domains specified in index_domains.
index_domains (list): List of domain names to attach the generated
file to when indexing. Ignored if index == False. If None,
All available domains are used. |
388,917 | def _complete_batch_send(self, resp):
self._batch_send_d = None
self._req_attempts = 0
self._retry_interval = self._init_retry_interval
if isinstance(resp, Failure) and not resp.check(tid_CancelledError,
CancelledError):
log.error("Failure detected in _complete_batch_send: %r\n%r",
resp, resp.getTraceback())
return | Complete the processing of our batch send operation
Clear the deferred tracking our current batch processing
and reset our retry count and retry interval
Return none to eat any errors coming from up the deferred chain |
388,918 | def dict_of_numpyarray_to_dict_of_list(d):
for key,value in d.iteritems():
if isinstance(value,dict):
d[key] = dict_of_numpyarray_to_dict_of_list(value)
elif isinstance(value,np.ndarray):
d[key] = value.tolist()
return d | Convert dictionary containing numpy arrays to dictionary containing lists
Parameters
----------
d : dict
sli parameter name and value as dictionary key and value pairs
Returns
-------
d : dict
modified dictionary |
388,919 | def evaluate_accuracy(data_iterator, net):
acc = mx.metric.Accuracy()
for data, label in data_iterator:
output = net(data)
predictions = nd.argmax(output, axis=1)
predictions = predictions.reshape((-1, 1))
acc.update(preds=predictions, labels=label)
return acc.get()[1] | Function to evaluate accuracy of any data iterator passed to it as an argument |
388,920 | def start(self) -> None:
if self.config:
self.websocket = self.ws_client(
self.loop, self.session, self.host,
self.config.websocketport, self.async_session_handler)
self.websocket.start()
else:
_LOGGER.error() | Connect websocket to deCONZ. |
388,921 | def PhyDMSComprehensiveParser():
parser = ArgumentParserNoArgHelp(description=("Comprehensive phylogenetic "
"model comparison and detection of selection informed by deep "
"mutational scanning data. This program runs repeatedly "
"to compare substitution models and detect selection. "
"{0} Version {1}. Full documentation at {2}").format(
phydmslib.__acknowledgments__, phydmslib.__version__,
phydmslib.__url__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(, help=, type=str)
parser.add_argument(, help=
, type=ExistingFile)
parser.add_argument(, help=
, type=ExistingFile, nargs=)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(, help="Path to RAxML (e.g., )")
group.add_argument(, type=ExistingFile,
help="Existing Newick file giving input tree.")
parser.add_argument(, default=-1, help=
, type=int)
parser.add_argument(, choices=[, ],
default=, help=("How to handle branch lengths: "
"scale by single parameter or optimize each one"))
parser.set_defaults(omegabysite=False)
parser.add_argument(, dest=,
action=, help="Fit omega (dN/dS) for each site.")
parser.set_defaults(diffprefsbysite=False)
parser.add_argument(, dest=,
action=, help="Fit differential preferences for "
"each site.")
parser.set_defaults(gammaomega=False)
parser.add_argument(, dest=, action=\
, help="Fit ExpCM with gamma distributed omega.")
parser.set_defaults(gammabeta=False)
parser.add_argument(, dest=, action=\
, help="Fit ExpCM with gamma distributed beta.")
parser.set_defaults(noavgprefs=False)
parser.add_argument(, dest=, action=,
help="No fitting of models with preferences averaged across sites "
"for ExpCM.")
parser.set_defaults(randprefs=False)
parser.add_argument(, dest=, action=,
help="Include ExpCM models with randomized preferences.")
parser.add_argument(, , action=, version=
.format(version=phydmslib.__version__))
return parser | Returns *argparse.ArgumentParser* for ``phdyms_comprehensive`` script. |
388,922 | def next(self):
while True:
self.cur_idx += 1
if self.__datasource.populate_iteration(self):
return self
raise StopIteration | Move to the next valid locus.
Will only return valid loci or exit via StopIteration exception |
388,923 | def _fix_typo(s):
subst, attr, mode = s
return m(subst, attr, script("t.-x.-s.y.-'")) | M:.-O:.-'M:.-wa.e.-'t.x.-s.y.-', => M:.-O:.-'M:.-wa.e.-'t.-x.-s.y.-', |
388,924 | def exit_and_fail(self, msg=None, out=None):
self.exit(result=PANTS_FAILED_EXIT_CODE, msg=msg, out=out) | Exits the runtime with a nonzero exit code, indicating failure.
:param msg: A string message to print to stderr or another custom file desciptor before exiting.
(Optional)
:param out: The file descriptor to emit `msg` to. (Optional) |
388,925 | def host_members(self):
host = self.host()
if host is None:
return
for member, full_member in host.members_objects:
yield full_member | Return the members of the host committee. |
388,926 | def xpathNextPreceding(self, cur):
if cur is None: cur__o = None
else: cur__o = cur._o
ret = libxml2mod.xmlXPathNextPreceding(self._o, cur__o)
if ret is None:raise xpathError()
__tmp = xmlNode(_obj=ret)
return __tmp | Traversal function for the "preceding" direction the
preceding axis contains all nodes in the same document as
the context node that are before the context node in
document order, excluding any ancestors and excluding
attribute nodes and namespace nodes; the nodes are ordered
in reverse document order |
388,927 | def get_resource_group(access_token, subscription_id, rgname):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
, rgname,
, RESOURCE_API])
return do_get(endpoint, access_token) | Get details about the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body. |
388,928 | def auth(self):
if not self.store_handler.has_value():
params = {}
params["access_key"] = self.access_key
params["secret_key"] = self.secret_key
params["app_id"] = self.app_id
params["device_id"] = self.device_id
auth_url = self.api_path + "auth/"
response = self.get_response(auth_url, params, "post")
if in response:
self.store_handler.set_value("access_token", response[][])
self.store_handler.set_value("expires", response[][])
logger.info()
else:
msg = "Error getting access_token, " + \
"please verify your access_key, secret_key and app_id"
logger.error(msg)
raise Exception("Auth Failed, please check your access details") | Auth is used to call the AUTH API of CricketAPI.
Access token required for every request call to CricketAPI.
Auth functional will post user Cricket API app details to server
and return the access token.
Return:
Access token |
388,929 | def fcm_send_single_device_data_message(
registration_id,
condition=None,
collapse_key=None,
delay_while_idle=False,
time_to_live=None,
restricted_package_name=None,
low_priority=False,
dry_run=False,
data_message=None,
content_available=None,
api_key=None,
timeout=5,
json_encoder=None):
push_service = FCMNotification(
api_key=SETTINGS.get("FCM_SERVER_KEY") if api_key is None else api_key,
json_encoder=json_encoder,
)
return push_service.single_device_data_message(
registration_id=registration_id,
condition=condition,
collapse_key=collapse_key,
delay_while_idle=delay_while_idle,
time_to_live=time_to_live,
restricted_package_name=restricted_package_name,
low_priority=low_priority,
dry_run=dry_run,
data_message=data_message,
content_available=content_available,
timeout=timeout
) | Send push message to a single device
All arguments correspond to that defined in pyfcm/fcm.py.
Args:
registration_id (str): FCM device registration IDs.
data_message (dict): Data message payload to send alone or with the
notification message
Keyword Args:
collapse_key (str, optional): Identifier for a group of messages
that can be collapsed so that only the last message gets sent
when delivery can be resumed. Defaults to ``None``.
delay_while_idle (bool, optional): If ``True`` indicates that the
message should not be sent until the device becomes active.
time_to_live (int, optional): How long (in seconds) the message
should be kept in FCM storage if the device is offline. The
maximum time to live supported is 4 weeks. Defaults to ``None``
which uses the FCM default of 4 weeks.
low_priority (boolean, optional): Whether to send notification with
the low priority flag. Defaults to ``False``.
restricted_package_name (str, optional): Package name of the
application where the registration IDs must match in order to
receive the message. Defaults to ``None``.
dry_run (bool, optional): If ``True`` no message will be sent but
request will be tested.
timeout (int, optional): set time limit for the request
Returns:
:dict:`multicast_id(long), success(int), failure(int),
canonical_ids(int), results(list)`:
Response from FCM server.
Raises:
AuthenticationError: If :attr:`api_key` is not set or provided or there
is an error authenticating the sender.
FCMServerError: Internal server error or timeout error on Firebase cloud
messaging server
InvalidDataError: Invalid data provided
InternalPackageError: Mostly from changes in the response of FCM,
contact the project owner to resolve the issue |
388,930 | def nltk_tree_to_logical_form(tree: Tree) -> str:
| Given an ``nltk.Tree`` representing the syntax tree that generates a logical form, this method
produces the actual (lisp-like) logical form, with all of the non-terminal symbols converted
into the correct number of parentheses.
This is used in the logic that converts action sequences back into logical forms. It's very
unlikely that you will need this anywhere else. |
388,931 | def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
now = time.time()
r = []
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
return "\n".join(r+[""]) | Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
ignore_discard and ignore_expires: see docstring for FileCookieJar.save |
388,932 | def cmd_tracker_mode(self, args):
connection = self.find_connection()
if not connection:
print("No antenna tracker found")
return
mode_mapping = connection.mode_mapping()
if mode_mapping is None:
print()
return
if len(args) != 1:
print(, mode_mapping.keys())
return
mode = args[0].upper()
if mode not in mode_mapping:
print( % mode)
return
connection.set_mode(mode_mapping[mode]) | set arbitrary mode |
388,933 | def setnx(self, key, value):
fut = self.execute(b, key, value)
return wait_convert(fut, bool) | Set the value of a key, only if the key does not exist. |
388,934 | def set_check(self, name, state):
if self.child.is_alive():
self.parent_pipe.send(CheckItem(name, state)) | set a status value |
388,935 | def convert_to_unicode( tscii_input ):
output = list()
prev = None
prev2x = None
for char in tscii_input:
if ord(char) < 128 :
output.append( char )
prev = None
prev2x = None
elif ord(char) in TSCII_DIRECT_LOOKUP:
if ( prev in TSCII_PRE_MODIFIER ):
curr_char = [TSCII[ord(char)],TSCII[prev]]
else:
curr_char = [TSCII[ord(char)]]
char = None
output.extend( curr_char )
elif ( (ord(char) in TSCII_POST_MODIFIER) ):
if ( (prev in TSCII_DIRECT_LOOKUP) and
(prev2x in TSCII_PRE_MODIFIER) ):
if len(output) >= 2:
del output[-1]
del output[-2]
elif len(output)==1:
del output[-1]
else:
pass
output.extend( [TSCII[prev], TSCII[prev2x]] )
else:
print("Warning: malformed TSCII encoded file; skipping characters")
prev = None
char = None
else:
pass
prev2x = prev
if char:
prev = ord(char)
return u"".join(output) | convert a byte-ASCII encoded string into equivalent Unicode string
in the UTF-8 notation. |
388,936 | def banner(*lines, **kwargs):
sep = kwargs.get("sep", "*")
count = kwargs.get("width", globals()["WIDTH"])
out(sep * count)
if lines:
out(sep)
for line in lines:
out("{} {}".format(sep, line))
out(sep)
out(sep * count) | prints a banner
sep -- string -- the character that will be on the line on the top and bottom
and before any of the lines, defaults to *
count -- integer -- the line width, defaults to 80 |
388,937 | def is_subdir(a, b):
a, b = map(os.path.abspath, [a, b])
return os.path.commonpath([a, b]) == b | Return true if a is a subdirectory of b |
388,938 | def _sanitize_parameters(self):
if isinstance(self._additional_edges, (list, set, tuple)):
new_dict = defaultdict(list)
for s, d in self._additional_edges:
new_dict[s].append(d)
self._additional_edges = new_dict
elif isinstance(self._additional_edges, dict):
pass
else:
raise AngrCFGError()
if self._advanced_backward_slicing and self._enable_symbolic_back_traversal:
raise AngrCFGError()
if self._advanced_backward_slicing and not self._keep_state:
raise AngrCFGError()
self._avoid_runs = [ ] if self._avoid_runs is None else self._avoid_runs
if not isinstance(self._avoid_runs, (list, set)):
raise AngrCFGError()
self._sanitize_starts() | Perform a sanity check on parameters passed in to CFG.__init__().
An AngrCFGError is raised if any parameter fails the sanity check.
:return: None |
388,939 | def mk_subsuper_association(m, r_subsup):
r_rel = one(r_subsup).R_REL[206]()
r_rto = one(r_subsup).R_SUPER[212].R_RTO[204]()
target_o_obj = one(r_rto).R_OIR[203].O_OBJ[201]()
for r_sub in many(r_subsup).R_SUB[213]():
r_rgo = one(r_sub).R_RGO[205]()
source_o_obj = one(r_rgo).R_OIR[203].O_OBJ[201]()
source_ids, target_ids = _get_related_attributes(r_rgo, r_rto)
m.define_association(rel_id=r_rel.Numb,
source_kind=source_o_obj.Key_Lett,
target_kind=target_o_obj.Key_Lett,
source_keys=source_ids,
target_keys=target_ids,
source_conditional=True,
target_conditional=False,
source_phrase=,
target_phrase=,
source_many=False,
target_many=False) | Create pyxtuml associations from a sub/super association in BridgePoint. |
388,940 | def get_template_data(template_file):
if not pathlib.Path(template_file).exists():
raise ValueError("Template file not found at {}".format(template_file))
with open(template_file, ) as fp:
try:
return yaml_parse(fp.read())
except (ValueError, yaml.YAMLError) as ex:
raise ValueError("Failed to parse template: {}".format(str(ex))) | Read the template file, parse it as JSON/YAML and return the template as a dictionary.
Parameters
----------
template_file : string
Path to the template to read
Returns
-------
Template data as a dictionary |
388,941 | def _parse_request(self, schema, req, locations):
if schema.many:
assert (
"json" in locations
), "schema.many=True is only supported for JSON location"
parsed = self.parse_arg(
name="json",
field=ma.fields.Nested(schema, many=True),
req=req,
locations=locations,
)
if parsed is missing:
parsed = []
else:
argdict = schema.fields
parsed = {}
for argname, field_obj in iteritems(argdict):
if MARSHMALLOW_VERSION_INFO[0] < 3:
parsed_value = self.parse_arg(argname, field_obj, req, locations)
if parsed_value is missing and field_obj.load_from:
parsed_value = self.parse_arg(
field_obj.load_from, field_obj, req, locations
)
argname = field_obj.load_from
else:
argname = field_obj.data_key or argname
parsed_value = self.parse_arg(argname, field_obj, req, locations)
if parsed_value is not missing:
parsed[argname] = parsed_value
return parsed | Return a parsed arguments dictionary for the current request. |
388,942 | def get_normal_draws(num_mixers,
num_draws,
num_vars,
seed=None):
assert all([isinstance(x, int) for x in [num_mixers, num_draws, num_vars]])
assert all([x > 0 for x in [num_mixers, num_draws, num_vars]])
if seed is not None:
assert isinstance(seed, int) and seed > 0
normal_dist = scipy.stats.norm(loc=0.0, scale=1.0)
all_draws = []
if seed:
np.random.seed(seed)
for i in xrange(num_vars):
all_draws.append(normal_dist.rvs(size=(num_mixers, num_draws)))
return all_draws | Parameters
----------
num_mixers : int.
Should be greater than zero. Denotes the number of observations for
which we are making draws from a normal distribution for. I.e. the
number of observations with randomly distributed coefficients.
num_draws : int.
Should be greater than zero. Denotes the number of draws that are to be
made from each normal distribution.
num_vars : int.
Should be greater than zero. Denotes the number of variables for which
we need to take draws from the normal distribution.
seed : int or None, optional.
If an int is passed, it should be greater than zero. Denotes the value
to be used in seeding the random generator used to generate the draws
from the normal distribution. Default == None.
Returns
-------
all_draws : list of 2D ndarrays.
The list will have num_vars elements. Each element will be a num_obs by
num_draws numpy array of draws from a normal distribution with mean
zero and standard deviation of one. |
388,943 | def create_and_register_access97_db(filename: str,
dsn: str,
description: str) -> bool:
fullfilename = os.path.abspath(filename)
create_string = fullfilename + " General"
return (create_user_dsn(access_driver, CREATE_DB3=create_string) and
register_access_db(filename, dsn, description)) | (Windows only.)
Creates a Microsoft Access 97 database and registers it with ODBC.
Args:
filename: filename of the database to create
dsn: ODBC data source name to create
description: description of the database
Returns:
bool: was the DSN created? |
388,944 | def domain_create(auth=None, **kwargs):
*
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_domain(**kwargs) | Create a domain
CLI Example:
.. code-block:: bash
salt '*' keystoneng.domain_create name=domain1 |
388,945 | def Softmax(x, params, axis=-1, **kwargs):
del params, kwargs
return np.exp(x - backend.logsumexp(x, axis, keepdims=True)) | Apply softmax to x: exponentiate and normalize along the given axis. |
388,946 | def ParseRow(self, parser_mediator, row_offset, row):
timestamp = self._ParseTimestamp(parser_mediator, row)
if timestamp is None:
return
event_data = TrendMicroUrlEventData()
event_data.offset = row_offset
for field in (
, , ,
, ):
try:
value = int(row[field], 10)
except (ValueError, TypeError):
value = None
setattr(event_data, field, value)
for field in (, , , , ):
setattr(event_data, field, row[field])
event = time_events.DateTimeValuesEvent(
timestamp, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a line of the log file and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
row_offset (int): line number of the row.
row (dict[str, str]): fields of a single row, as specified in COLUMNS. |
388,947 | def _create_cell(args, cell_body):
name = args.get()
if name is None:
raise Exception("Pipeline name was not specified.")
pipeline_spec = google.datalab.utils.commands.parse_config(
cell_body, google.datalab.utils.commands.notebook_environment())
airflow_spec = google.datalab.contrib.pipeline._pipeline.PipelineGenerator.generate_airflow_spec(
name, pipeline_spec)
debug = args.get()
if debug is True:
return airflow_spec | Implements the pipeline cell create magic used to create Pipeline objects.
The supported syntax is:
%%pipeline create <args>
[<inline YAML>]
Args:
args: the arguments following '%%pipeline create'.
cell_body: the contents of the cell |
388,948 | def remove_attr(self, attr):
self._stable = False
self.attrs.pop(attr, None)
return self | Removes an attribute. |
388,949 | def init_widget(self):
super(AndroidChronometer, self).init_widget()
w = self.widget
w.setOnChronometerTickListener(w.getId())
w.onChronometerTick.connect(self.on_chronometer_tick) | Initialize the underlying widget. |
388,950 | def getlist(self, section, option):
value_list = self.get(section, option)
values = []
for value_line in value_list.split():
for value in value_line.split():
value = value.strip()
if value:
values.append(value)
return values | Read a list of strings.
The value of `section` and `option` is treated as a comma- and newline-
separated list of strings. Each value is stripped of whitespace.
Returns the list of strings. |
388,951 | def yn_prompt(text):
text = "\n"+ text + "\n( or ): "
while True:
answer = input(text).strip()
if answer != and answer != :
continue
elif answer == :
return True
elif answer == :
return False | Takes the text prompt, and presents it, takes only "y" or "n" for
answers, and returns True or False. Repeats itself on bad input. |
388,952 | def find_declaration(self):
if not self.is_plt:
binary_name = self.binary_name
if binary_name not in SIM_LIBRARIES:
return
else:
binary_name = None
edges = self.transition_graph.edges()
node = next(iter(edges))[1]
if len(edges) == 1 and (type(node) is HookNode or type(node) is SyscallNode):
target = node.addr
if target in self._function_manager:
target_func = self._function_manager[target]
binary_name = target_func.binary_name
if binary_name is None:
return
library = SIM_LIBRARIES.get(binary_name, None)
if library is None:
return
if not library.has_prototype(self.name):
return
proto = library.prototypes[self.name]
self.prototype = proto
if self.calling_convention is not None:
self.calling_convention.args = None
self.calling_convention.func_ty = proto | Find the most likely function declaration from the embedded collection of prototypes, set it to self.prototype,
and update self.calling_convention with the declaration.
:return: None |
388,953 | def _reset_problem_type(self):
if self._solve_count > 0:
integer_count = 0
for func in (self._cp.variables.get_num_binary,
self._cp.variables.get_num_integer,
self._cp.variables.get_num_semicontinuous,
self._cp.variables.get_num_semiinteger):
integer_count += func()
integer = integer_count > 0
quad_constr = self._cp.quadratic_constraints.get_num() > 0
quad_obj = self._cp.objective.get_num_quadratic_variables() > 0
if not integer:
if quad_constr:
new_type = self._cp.problem_type.QCP
elif quad_obj:
new_type = self._cp.problem_type.QP
else:
new_type = self._cp.problem_type.LP
else:
if quad_constr:
new_type = self._cp.problem_type.MIQCP
elif quad_obj:
new_type = self._cp.problem_type.MIQP
else:
new_type = self._cp.problem_type.MILP
logger.debug(.format(
self._cp.problem_type[new_type]))
self._cp.set_problem_type(new_type)
else:
logger.debug(.format(
self._cp.problem_type[self._cp.get_problem_type()]))
quad_obj = self._cp.objective.get_num_quadratic_variables() > 0
if hasattr(self._cp.parameters, ):
target_param = self._cp.parameters.optimalitytarget
else:
target_param = self._cp.parameters.solutiontarget
if quad_obj:
target_param.set(target_param.values.optimal_global)
else:
target_param.set(target_param.values.auto) | Reset problem type to whatever is appropriate. |
388,954 | def dict_contents(self, use_dict=None, as_class=None):
if _debug: _log.debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
return str(self) | Return the contents of an object as a dict. |
388,955 | def rdf_source(self, aformat="turtle"):
if aformat and aformat not in self.SUPPORTED_FORMATS:
return "Sorry. Allowed formats are %s" % str(self.SUPPORTED_FORMATS)
if aformat == "dot":
return self.__serializedDot()
else:
return self.rdflib_graph.serialize(format=aformat) | Serialize graph using the format required |
388,956 | def find_xml_all(cls, url, markup, tag, pattern):
body = cls.find_xml(url, markup)
return body.find_all(tag, href=re.compile(pattern)) | find xml(list)
:param url: contents url
:param markup: markup provider
:param tag: find tag
:param pattern: xml file pattern
:return: BeautifulSoup object list |
388,957 | def seq_minibatches(inputs, targets, batch_size, seq_length, stride=1):
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
n_loads = (batch_size * stride) + (seq_length - stride)
for start_idx in range(0, len(inputs) - n_loads + 1, (batch_size * stride)):
seq_inputs = np.zeros((batch_size, seq_length) + inputs.shape[1:], dtype=inputs.dtype)
seq_targets = np.zeros((batch_size, seq_length) + targets.shape[1:], dtype=targets.dtype)
for b_idx in xrange(batch_size):
start_seq_idx = start_idx + (b_idx * stride)
end_seq_idx = start_seq_idx + seq_length
seq_inputs[b_idx] = inputs[start_seq_idx:end_seq_idx]
seq_targets[b_idx] = targets[start_seq_idx:end_seq_idx]
flatten_inputs = seq_inputs.reshape((-1, ) + inputs.shape[1:])
flatten_targets = seq_targets.reshape((-1, ) + targets.shape[1:])
yield flatten_inputs, flatten_targets | Generate a generator that return a batch of sequence inputs and targets.
If `batch_size=100` and `seq_length=5`, one return will have 500 rows (examples).
Parameters
----------
inputs : numpy.array
The input features, every row is a example.
targets : numpy.array
The labels of inputs, every element is a example.
batch_size : int
The batch size.
seq_length : int
The sequence length.
stride : int
The stride step, default is 1.
Examples
--------
Synced sequence input and output.
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> y = np.asarray([0, 1, 2, 3, 4, 5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=y, batch_size=2, seq_length=2, stride=1):
>>> print(batch)
(array([['a', 'a'], ['b', 'b'], ['b', 'b'], ['c', 'c']], dtype='<U1'), array([0, 1, 1, 2]))
(array([['c', 'c'], ['d', 'd'], ['d', 'd'], ['e', 'e']], dtype='<U1'), array([2, 3, 3, 4]))
Many to One
>>> return_last = True
>>> num_steps = 2
>>> X = np.asarray([['a','a'], ['b','b'], ['c','c'], ['d','d'], ['e','e'], ['f','f']])
>>> Y = np.asarray([0,1,2,3,4,5])
>>> for batch in tl.iterate.seq_minibatches(inputs=X, targets=Y, batch_size=2, seq_length=num_steps, stride=1):
>>> x, y = batch
>>> if return_last:
>>> tmp_y = y.reshape((-1, num_steps) + y.shape[1:])
>>> y = tmp_y[:, -1]
>>> print(x, y)
[['a' 'a']
['b' 'b']
['b' 'b']
['c' 'c']] [1 2]
[['c' 'c']
['d' 'd']
['d' 'd']
['e' 'e']] [3 4] |
388,958 | def filter_for_ignored_ext(result, ignored_ext, ignore_copying):
if ignore_copying:
log = LoggingMixin().log
regex_builder = r"^.*\.(%s$)$" % .join(ignored_ext)
ignored_extensions_regex = re.compile(regex_builder)
log.debug(
,
ignored_extensions_regex.pattern, map(lambda x: x[], result)
)
result = [x for x in result if not ignored_extensions_regex.match(x[])]
log.debug(, result)
return result | Will filter if instructed to do so the result to remove matching criteria
:param result: list of dicts returned by Snakebite ls
:type result: list[dict]
:param ignored_ext: list of ignored extensions
:type ignored_ext: list
:param ignore_copying: shall we ignore ?
:type ignore_copying: bool
:return: list of dicts which were not removed
:rtype: list[dict] |
388,959 | def save(self, ts):
with open(self, ) as f:
Timestamp.wrap(ts).dump(f) | Save timestamp to file. |
388,960 | def _cur_band_filled(self):
cur_band = self._hyperbands[self._state["band_idx"]]
return len(cur_band) == self._s_max_1 | Checks if the current band is filled.
The size of the current band should be equal to s_max_1 |
388,961 | def extract(cls, padded):
if padded.startswith("@{") and padded.endswith("}"):
return padded[2:len(padded)-1]
else:
return padded | Removes the surrounding "@{...}" from the name.
:param padded: the padded string
:type padded: str
:return: the extracted name
:rtype: str |
388,962 | def remove(self, data):
current_node = self._first_node
deleted = False
if self._size == 0:
return
if data == current_node.data():
if current_node.next() is None:
self._first_node = LinkedListNode(None, None)
self._last_node = self._first_node
self._size = 0
return
current_node = current_node.next()
self._first_node = current_node
self._size -= 1
return
while True:
if current_node is None:
deleted = False
break
next_node = current_node.next()
if next_node is not None:
if data == next_node.data():
next_next_node = next_node.next()
current_node.update_next(next_next_node)
next_node = None
deleted = True
break
current_node = current_node.next()
if deleted:
self._size -= 1 | Removes a data node from the list. If the list contains more than one
node having the same data that shall be removed, then the node having
the first occurrency of the data is removed.
:param data: the data to be removed in the new list node
:type data: object |
388,963 | def enqueue(self, item_type, item):
with self.enlock:
self.queue[item_type].append(item) | Queue a new data item, make item iterable |
388,964 | def astra_projector(vol_interp, astra_vol_geom, astra_proj_geom, ndim, impl):
if vol_interp not in (, ):
raise ValueError("`vol_interp` not understood"
.format(vol_interp))
impl = str(impl).lower()
if impl not in (, ):
raise ValueError("`impl` not understood"
.format(impl))
if not in astra_proj_geom:
raise ValueError(
.format(astra_proj_geom))
if ndim == 3 and impl == :
raise ValueError()
ndim = int(ndim)
proj_type = astra_proj_geom[]
if proj_type not in (, , ,
, , , ):
raise ValueError(.format(proj_type))
type_map_cpu = {: {: ,
: },
: {: ,
: },
: {: ,
: },
: {: ,
: }}
type_map_cpu[] = type_map_cpu[]
type_map_cpu[] = type_map_cpu[]
type_map_cpu[] = type_map_cpu[]
type_map_cuda = {: ,
: }
type_map_cuda[] = type_map_cuda[]
type_map_cuda[] = type_map_cuda[]
type_map_cuda[] = type_map_cuda[]
type_map_cuda[] = type_map_cuda[]
type_map_cuda[] = type_map_cuda[]
proj_cfg = {}
if impl == :
proj_cfg[] = type_map_cpu[proj_type][vol_interp]
else:
proj_cfg[] = type_map_cuda[proj_type]
proj_cfg[] = astra_vol_geom
proj_cfg[] = astra_proj_geom
proj_cfg[] = {}
if (proj_type in (, ) and
astra_supports()):
proj_cfg[][] = True
if ndim == 2:
return astra.projector.create(proj_cfg)
else:
return astra.projector3d.create(proj_cfg) | Create an ASTRA projector configuration dictionary.
Parameters
----------
vol_interp : {'nearest', 'linear'}
Interpolation type of the volume discretization. This determines
the projection model that is chosen.
astra_vol_geom : dict
ASTRA volume geometry.
astra_proj_geom : dict
ASTRA projection geometry.
ndim : {2, 3}
Number of dimensions of the projector.
impl : {'cpu', 'cuda'}
Implementation of the projector.
Returns
-------
proj_id : int
Handle for the created ASTRA internal projector object. |
388,965 | def capture_output(self, with_hook=True):
self.hooked =
def display_hook(obj):
self.hooked += self.safe_better_repr(obj)
self.last_obj = obj
stdout, stderr = sys.stdout, sys.stderr
if with_hook:
d_hook = sys.displayhook
sys.displayhook = display_hook
sys.stdout, sys.stderr = StringIO(), StringIO()
out, err = [], []
try:
yield out, err
finally:
out.extend(sys.stdout.getvalue().splitlines())
err.extend(sys.stderr.getvalue().splitlines())
if with_hook:
sys.displayhook = d_hook
sys.stdout, sys.stderr = stdout, stderr | Steal stream output, return them in string, restore them |
388,966 | def process_placeholder_image(self):
if self.placeholder_image_name:
return
placeholder_image_name = None
placeholder_image = self.placeholder_image
if placeholder_image:
if isinstance(placeholder_image, OnStoragePlaceholderImage):
name = placeholder_image.path
else:
name = placeholder_image.image_data.name
placeholder_image_name = os.path.join(
VERSATILEIMAGEFIELD_PLACEHOLDER_DIRNAME, name
)
if not self.storage.exists(placeholder_image_name):
self.storage.save(
placeholder_image_name,
placeholder_image.image_data
)
self.placeholder_image_name = placeholder_image_name | Process the field's placeholder image.
Ensures the placeholder image has been saved to the same storage class
as the field in a top level folder with a name specified by
settings.VERSATILEIMAGEFIELD_SETTINGS['placeholder_directory_name']
This should be called by the VersatileImageFileDescriptor __get__.
If self.placeholder_image_name is already set it just returns right away. |
388,967 | def search_related(self, request):
logger.debug("Cache Search Request")
if self.cache.is_empty() is True:
logger.debug("Empty Cache")
return None
result = []
items = list(self.cache.cache.items())
for key, item in items:
element = self.cache.get(item.key)
logger.debug("Element : {elm}".format(elm=str(element)))
if request.proxy_uri == element.uri:
result.append(item)
return result | extracting everything from the cache |
388,968 | def random_string(length=6, alphabet=string.ascii_letters+string.digits):
return .join([random.choice(alphabet) for i in xrange(length)]) | Return a random string of given length and alphabet.
Default alphabet is url-friendly (base62). |
388,969 | def authenticate_server(self, response):
log.debug("authenticate_server(): Authenticate header: {0}".format(
_negotiate_value(response)))
host = urlparse(response.url).hostname
try:
if self.cbt_struct:
result = kerberos.authGSSClientStep(self.context[host],
_negotiate_value(response),
channel_bindings=self.cbt_struct)
else:
result = kerberos.authGSSClientStep(self.context[host],
_negotiate_value(response))
except kerberos.GSSError:
log.exception("authenticate_server(): authGSSClientStep() failed:")
return False
if result < 1:
log.error("authenticate_server(): authGSSClientStep() failed: "
"{0}".format(result))
return False
log.debug("authenticate_server(): returning {0}".format(response))
return True | Uses GSSAPI to authenticate the server.
Returns True on success, False on failure. |
388,970 | def get_jobs_from_argument(self, raw_job_string):
jobs = []
if raw_job_string.startswith(":"):
job_keys = raw_job_string.strip(" :")
jobs.extend([job for job in self.jobs(job_keys)])
else:
assert "/" in raw_job_string, "Job Code {0} is improperly formatted!".format(raw_job_string)
host, job_name = raw_job_string.rsplit("/", 1)
host_url = self._config_dict.get(host, {}).get(, host)
host = self.get_host(host_url)
if host.has_job(job_name):
jobs.append(JenksJob(None, host, job_name,
lambda: self._get_job_api_instance(host_url, job_name)))
else:
raise JenksDataException(
"Could not find Job {0}/{1}!".format(host, job_name))
return jobs | return a list of jobs corresponding to the raw_job_string |
388,971 | def get_all_events(self):
self.all_events = {}
events = self.tree.execute("$.events.frames")
if events is None:
return
for e in events:
event_type = e.get()
frame_id = e.get()
try:
self.all_events[event_type].append(frame_id)
except KeyError:
self.all_events[event_type] = [frame_id] | Gather all event IDs in the REACH output by type.
These IDs are stored in the self.all_events dict. |
388,972 | def chisquare(n_ij, weighted):
if weighted:
m_ij = n_ij / n_ij
nan_mask = np.isnan(m_ij)
m_ij[nan_mask] = 0.000001
w_ij = m_ij
n_ij_col_sum = n_ij.sum(axis=1)
n_ij_row_sum = n_ij.sum(axis=0)
alpha, beta, eps = (1, 1, 1)
while eps > 10e-6:
alpha = alpha * np.vstack(n_ij_col_sum / m_ij.sum(axis=1))
beta = n_ij_row_sum / (alpha * w_ij).sum(axis=0)
eps = np.max(np.absolute(w_ij * alpha * beta - m_ij))
m_ij = w_ij * alpha * beta
else:
m_ij = (np.vstack(n_ij.sum(axis=1)) * n_ij.sum(axis=0)) / n_ij.sum().astype(float)
dof = (n_ij.shape[0] - 1) * (n_ij.shape[1] - 1)
chi, p_val = stats.chisquare(n_ij, f_exp=m_ij, ddof=n_ij.size - 1 - dof, axis=None)
return (chi, p_val, dof) | Calculates the chisquare for a matrix of ind_v x dep_v
for the unweighted and SPSS weighted case |
388,973 | def Cp(self, T):
result = 0.0
for c, e in zip(self._coefficients, self._exponents):
result += c*T**e
return result | Calculate the heat capacity of the compound phase.
:param T: [K] temperature
:returns: [J/mol/K] Heat capacity. |
388,974 | def url_join(base, *args):
scheme, netloc, path, query, fragment = urlsplit(base)
path = path if len(path) else "/"
path = posixpath.join(path, *[( % x) for x in args])
return urlunsplit([scheme, netloc, path, query, fragment]) | Helper function to join an arbitrary number of url segments together. |
388,975 | def startGraph(self):
g = r.Graph()
g.namespace_manager.bind("rdf", r.namespace.RDF)
g.namespace_manager.bind("foaf", r.namespace.FOAF)
g.namespace_manager.bind("xsd", r.namespace.XSD)
g.namespace_manager.bind("opa", "http://purl.org/socialparticipation/opa/")
g.namespace_manager.bind("ops", "http://purl.org/socialparticipation/ops/")
g.namespace_manager.bind("wsg", "http://www.w3.org/2003/01/geo/wgs84_pos
g.namespace_manager.bind("dc2", "http://purl.org/dc/elements/1.1/")
g.namespace_manager.bind("dc", "http://purl.org/dc/terms/")
g.namespace_manager.bind("sioc", "http://rdfs.org/sioc/ns
g.namespace_manager.bind("tsioc", "http://rdfs.org/sioc/types
g.namespace_manager.bind("schema", "http://schema.org/")
g.namespace_manager.bind("part", "http://participa.br/")
self.g=g | Starts RDF graph and bing namespaces |
388,976 | def GetDirections(self, origin, destination, sensor = False, mode = None, waypoints = None, alternatives = None, avoid = None, language = None, units = None,
region = None, departure_time = None, arrival_time = None):
params = {
: origin,
: destination,
: str(sensor).lower()
}
if mode:
params[] = mode
if waypoints:
params[] = waypoints
if alternatives:
params[] = alternatives
if avoid:
params[] = avoid
if language:
params[] = language
if units:
params[] = units
if region:
params[] = region
if departure_time:
params[] = departure_time
if arrival_time:
params[] = arrival_time
if not self.premier:
url = self.get_url(params)
else:
url = self.get_signed_url(params)
return self.GetService_url(url) | Get Directions Service
Pls refer to the Google Maps Web API for the details of the remained parameters |
388,977 | def ToLatLng(self):
rad_lat = math.atan2(self.z, math.sqrt(self.x * self.x + self.y * self.y))
rad_lng = math.atan2(self.y, self.x)
return (rad_lat * 180.0 / math.pi, rad_lng * 180.0 / math.pi) | Returns that latitude and longitude that this point represents
under a spherical Earth model. |
388,978 | def bind(self, prefix, namespace, *args, **kwargs):
setattr(self, prefix, RdfNamespace(prefix, namespace, **kwargs))
if kwargs.pop(, True):
self.__make_dicts__ | Extends the function to add an attribute to the class for each
added namespace to allow for use of dot notation. All prefixes are
converted to lowercase
Args:
prefix: string of namespace name
namespace: rdflib.namespace instance
kwargs:
calc: whether or not create the lookup reference dictionaries
Example usage:
RdfNsManager.rdf.type =>
http://www.w3.org/1999/02/22-rdf-syntax-ns#type |
388,979 | def xi2_from_mass1_mass2_spin2x_spin2y(mass1, mass2, spin2x, spin2y):
q = q_from_mass1_mass2(mass1, mass2)
a1 = 2 + 3 * q / 2
a2 = 2 + 3 / (2 * q)
return a1 / (q**2 * a2) * chi_perp_from_spinx_spiny(spin2x, spin2y) | Returns the effective precession spin argument for the smaller mass.
This function assumes it's given spins of the secondary mass. |
388,980 | def log(self, branch, remote):
log_hook = self.settings[]
if log_hook:
if ON_WINDOWS:
with NamedTemporaryFile(
prefix=, suffix=, delete=False
) as bat_file:
| Call a log-command, if set by git-up.fetch.all. |
388,981 | def _prefetch_items(self,change):
if self.is_initialized:
view = self.item_view
upper_limit = view.iterable_index+view.iterable_fetch_size-view.iterable_prefetch
lower_limit = max(0,view.iterable_index+view.iterable_prefetch)
offset = int(view.iterable_fetch_size/2.0)
upper_visible_row = view.visible_rect[2]
lower_visible_row = view.visible_rect[0]
print("Visible rect = %s"%view.visible_rect)
if upper_visible_row >= upper_limit:
next_index = max(0,upper_visible_row-offset)
if next_index>view.iterable_index:
print("Auto prefetch upper limit %s!"%upper_limit)
view.iterable_index = next_index
elif view.iterable_index>0 and lower_visible_row < lower_limit:
next_index = max(0,lower_visible_row-offset)
if next_index<view.iterable_index:
print("Auto prefetch lower limit=%s, iterable=%s, setting next=%s!"%(lower_limit,view.iterable_index,next_index))
view.iterable_index = next_index | When the current_row in the model changes (whether from scrolling) or
set by the application. Make sure the results are loaded! |
388,982 | def snap_to_nearest_config(x, tune_params):
params = []
for i, k in enumerate(tune_params.keys()):
values = numpy.array(tune_params[k])
idx = numpy.abs(values-x[i]).argmin()
params.append(int(values[idx]))
return params | helper func that for each param selects the closest actual value |
388,983 | def merge_parameters(parameters, date_time, macros, types_and_values):
merged_parameters = Query._airflow_macro_formats(date_time=date_time, macros=macros,
types_and_values=types_and_values)
if parameters:
if types_and_values:
parameters = {
item[]: {: item[], : item[]}
for item in parameters
}
else:
parameters = {item[]: item[] for item in parameters}
merged_parameters.update(parameters)
return merged_parameters | Merge Return a mapping from airflow macro names (prefixed with '_') to values
Args:
date_time: The timestamp at which the macro values need to be evaluated. This is only
applicable when types_and_values = True
macros: If true, the values in the returned dict are the macro strings (like '{{ ds }}')
Returns:
The resolved value, i.e. the value with the format modifiers replaced with the corresponding
parameter-values. E.g. if value is <project-id>.<dataset-id>.logs_%(_ds)s, the returned
value could be <project-id>.<dataset-id>.logs_2017-12-21. |
388,984 | def load_modules_alignak_configuration(self):
alignak_cfg = {}
for instance in self.modules_manager.instances:
if not hasattr(instance, ):
return
try:
logger.info("Getting Alignak global configuration from module ", instance.name)
cfg = instance.get_alignak_configuration()
alignak_cfg.update(cfg)
except Exception as exp:
logger.error("Module %s get_alignak_configuration raised an exception %s. "
"Log and continue to run", instance.name, str(exp))
output = io.StringIO()
traceback.print_exc(file=output)
logger.error("Back trace of this remove: %s", output.getvalue())
output.close()
continue
params = []
if alignak_cfg:
logger.info("Got Alignak global configuration:")
for key, value in sorted(alignak_cfg.items()):
logger.info("- %s = %s", key, value)
if key.startswith():
key = + key[1:].upper() +
if value is None:
continue
if value == :
continue
if value == :
continue
params.append("%s=%s" % (key, value))
self.conf.load_params(params) | Load Alignak configuration from the arbiter modules
If module implements get_alignak_configuration, call this function
:param raw_objects: raw objects we got from reading config files
:type raw_objects: dict
:return: None |
388,985 | def surface_of_section(orbit, plane_ix, interpolate=False):
w = orbit.w()
if w.ndim == 2:
w = w[...,None]
ndim,ntimes,norbits = w.shape
H_dim = ndim // 2
p_ix = plane_ix + H_dim
if interpolate:
raise NotImplementedError("Not yet implemented, sorry!")
all_sos = np.zeros((ndim,norbits), dtype=object)
for n in range(norbits):
cross_ix = argrelmin(w[plane_ix,:,n]**2)[0]
cross_ix = cross_ix[w[p_ix,cross_ix,n] > 0.]
sos = w[:,cross_ix,n]
for j in range(ndim):
all_sos[j,n] = sos[j,:]
return all_sos | Generate and return a surface of section from the given orbit.
.. warning::
This is an experimental function and the API may change.
Parameters
----------
orbit : `~gala.dynamics.Orbit`
plane_ix : int
Integer that represents the coordinate to record crossings in. For
example, for a 2D Hamiltonian where you want to make a SoS in
:math:`y-p_y`, you would specify ``plane_ix=0`` (crossing the
:math:`x` axis), and this will only record crossings for which
:math:`p_x>0`.
interpolate : bool (optional)
Whether or not to interpolate on to the plane of interest. This
makes it much slower, but will work for orbits with a coarser
sampling.
Returns
-------
Examples
--------
If your orbit of interest is a tube orbit, it probably conserves (at
least approximately) some equivalent to angular momentum in the direction
of the circulation axis. Therefore, a surface of section in R-z should
be instructive for classifying these orbits. TODO...show how to convert
an orbit to Cylindrical..etc... |
388,986 | def getinfo(self, member):
if isinstance(member, RarInfo):
fname = member.filename
else:
fname = member
if PATH_SEP == :
fname2 = fname.replace("\\", "/")
else:
fname2 = fname.replace("/", "\\")
try:
return self._info_map[fname]
except KeyError:
try:
return self._info_map[fname2]
except KeyError:
raise NoRarEntry("No such file: %s" % fname) | Return RarInfo for filename |
388,987 | def vlan_classifier_rule_class_type_proto_proto_proto_val(self, **kwargs):
config = ET.Element("config")
vlan = ET.SubElement(config, "vlan", xmlns="urn:brocade.com:mgmt:brocade-vlan")
classifier = ET.SubElement(vlan, "classifier")
rule = ET.SubElement(classifier, "rule")
ruleid_key = ET.SubElement(rule, "ruleid")
ruleid_key.text = kwargs.pop()
class_type = ET.SubElement(rule, "class-type")
proto = ET.SubElement(class_type, "proto")
proto = ET.SubElement(proto, "proto")
proto_val = ET.SubElement(proto, "proto-val")
proto_val.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
388,988 | def on_nick(self, connection, event):
old_nickname = self.get_nickname(event)
old_color = self.nicknames.pop(old_nickname)
new_nickname = event.target()
message = "is now known as %s" % new_nickname
self.namespace.emit("message", old_nickname, message, old_color)
new_color = color(new_nickname)
self.nicknames[new_nickname] = new_color
self.emit_nicknames()
if self.nickname == old_nickname:
self.nickname = new_nickname | Someone changed their nickname - send the nicknames list to the
WebSocket. |
388,989 | def _check_available_data(archive, arc_type, day):
available_stations = []
if arc_type.lower() == :
wavefiles = glob.glob(os.path.join(archive, day.strftime(),
day.strftime(), ))
for wavefile in wavefiles:
header = read(wavefile, headonly=True)
available_stations.append((header[0].stats.station,
header[0].stats.channel))
elif arc_type.lower() == :
client = SeishubClient(archive)
st = client.get_previews(starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + 86400)
for tr in st:
available_stations.append((tr.stats.station, tr.stats.channel))
elif arc_type.lower() == :
client = FDSNClient(archive)
inventory = client.get_stations(starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + 86400,
level=)
for network in inventory:
for station in network:
for channel in station:
available_stations.append((station.code,
channel.code))
return available_stations | Function to check what stations are available in the archive for a given \
day.
:type archive: str
:param archive: The archive source
:type arc_type: str
:param arc_type: The type of archive, can be:
:type day: datetime.date
:param day: Date to retrieve data for
:returns: list of tuples of (station, channel) as available.
.. note:: Currently the seishub options are untested. |
388,990 | def render_file(self, filename):
dirname, basename = split(filename)
with changedir(dirname):
infile = abspath(basename)
outfile = abspath( % basename)
self.docutils.publish_file(infile, outfile, self.styles)
return outfile | Convert a reST file to HTML. |
388,991 | def remove_config_to_machine_group(self, project_name, config_name, group_name):
headers = {}
params = {}
resource = "/machinegroups/" + group_name + "/configs/" + config_name
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return RemoveConfigToMachineGroupResponse(header, resp) | remove a logtail config to a machine group
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_name: string
:param config_name: the logtail config name to apply
:type group_name: string
:param group_name: the machine group name
:return: RemoveConfigToMachineGroupResponse
:raise: LogException |
388,992 | def build_values(name, values_mods):
values_file = os.path.join(name, )
with open(values_file) as f:
values = yaml.load(f)
for key, value in values_mods.items():
parts = key.split()
mod_obj = values
for p in parts:
mod_obj = mod_obj[p]
print(f"Updating {values_file}: {key}: {value}")
if isinstance(mod_obj, MutableMapping):
keys = IMAGE_REPOSITORY_KEYS & mod_obj.keys()
if keys:
for key in keys:
mod_obj[key] = value[]
else:
possible_keys = .join(IMAGE_REPOSITORY_KEYS)
raise KeyError(
f
)
mod_obj[] = value[]
else:
raise TypeError(
f
)
with open(values_file, ) as f:
yaml.dump(values, f) | Update name/values.yaml with modifications |
388,993 | def create_in_cluster(self):
try:
self.api.create_namespaced_service(self.namespace, self.body)
except ApiException as e:
raise ConuException(
"Exception when calling Kubernetes API - create_namespaced_service: {}\n".format(e))
logger.info(
"Creating Service %s in namespace: %s", self.name, self.namespace) | call Kubernetes API and create this Service in cluster,
raise ConuExeption if the API call fails
:return: None |
388,994 | def intercept_(self):
if getattr(self, , None) is not None and self.booster != :
raise AttributeError(
.format(self.booster))
b = self.get_booster()
return np.array(json.loads(b.get_dump(dump_format=)[0])[]) | Intercept (bias) property
.. note:: Intercept is defined only for linear learners
Intercept (bias) is only defined when the linear model is chosen as base
learner (`booster=gblinear`). It is not defined for other base learner types, such
as tree learners (`booster=gbtree`).
Returns
-------
intercept_ : array of shape ``(1,)`` or ``[n_classes]`` |
388,995 | def delete_vmss(access_token, subscription_id, resource_group, vmss_name):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
, resource_group,
, vmss_name,
, COMP_API])
return do_delete(endpoint, access_token) | Delete a virtual machine scale set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response. |
388,996 | def csoftmax_for_slice(input):
[ten, u] = input
shape_t = ten.shape
shape_u = u.shape
ten -= tf.reduce_mean(ten)
q = tf.exp(ten)
active = tf.ones_like(u, dtype=tf.int32)
mass = tf.constant(0, dtype=tf.float32)
found = tf.constant(True, dtype=tf.bool)
def loop(q_, mask, mass_, found_):
q_list = tf.dynamic_partition(q_, mask, 2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(q_)[0]), mask, 2)
p = q_list[1] * (1.0 - mass_) / tf.reduce_sum(q_list[1])
p_new = tf.dynamic_stitch(condition_indices, [q_list[0], p])
less_mask = tf.cast(tf.less(u, p_new), tf.int32)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(p_new)[0]), less_mask,
2)
split_p_new = tf.dynamic_partition(p_new, less_mask, 2)
split_u = tf.dynamic_partition(u, less_mask, 2)
alpha = tf.dynamic_stitch(condition_indices, [split_p_new[0], split_u[1]])
mass_ += tf.reduce_sum(split_u[1])
mask = mask * (tf.ones_like(less_mask) - less_mask)
found_ = tf.cond(tf.equal(tf.reduce_sum(less_mask), 0),
lambda: False,
lambda: True)
alpha = tf.reshape(alpha, q_.shape)
return alpha, mask, mass_, found_
(csoft, mask_, _, _) = tf.while_loop(cond=lambda _0, _1, _2, f: f,
body=loop,
loop_vars=(q, active, mass, found))
return [csoft, mask_] | It is a implementation of the constrained softmax (csoftmax) for slice.
Based on the paper:
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers" (page 4)
Args:
input: A list of [input tensor, cumulative attention].
Returns:
output: A list of [csoftmax results, masks] |
388,997 | def classify(self, n_jobs=-1, configure=None):
start = timeit.default_timer()
networks = self.networks
n = len(networks)
cpu = n_jobs if n_jobs > -1 else mp.cpu_count()
if cpu > 1:
lpart = int(np.ceil(n / float(cpu))) if n > cpu else 1
parts = networks.split(np.arange(lpart, n, lpart))
behaviors_parts = Parallel(n_jobs=n_jobs)(delayed(__learn_io__)(part, self.setup, configure) for part in parts)
networks = core.LogicalNetworkList.from_hypergraph(networks.hg)
for behavior in behaviors_parts:
networks = networks.concat(behavior)
behaviors = __learn_io__(networks, self.setup, configure)
self.stats[] = timeit.default_timer() - start
self._logger.info("%s input-output logical behaviors found in %.4fs", len(behaviors), self.stats[])
return behaviors | Returns input-output behaviors for the list of logical networks in the attribute :attr:`networks`
Example::
>>> from caspo import core, classify
>>> networks = core.LogicalNetworkList.from_csv('networks.csv')
>>> setup = core.Setup.from_json('setup.json')
>>> classifier = classify.Classifier(networks, setup)
>>> behaviors = classifier.classify()
>>> behaviors.to_csv('behaviors.csv', networks=True)
n_jobs : int
Number of jobs to run in parallel. Default to -1 (all cores available)
configure : callable
Callable object responsible of setting clingo configuration
Returns
-------
caspo.core.logicalnetwork.LogicalNetworkList
The list of networks with one representative for each behavior |
388,998 | def highlight(self, *args):
toEnable = (self._highlighter is None)
seconds = 3
color = "red"
if len(args) > 3:
raise TypeError("Unrecognized argument(s) for highlight()")
for arg in args:
if type(arg) == bool:
toEnable = arg
elif isinstance(arg, Number):
seconds = arg
elif isinstance(arg, basestring):
color = arg
if self._highlighter is not None:
self._highlighter.close()
if toEnable:
self._highlighter = PlatformManager.highlight((self.getX(), self.getY(), self.getW(), self.getH()), color, seconds) | Highlights the region with a colored frame. Accepts the following parameters:
highlight([toEnable], [seconds], [color])
* toEnable (boolean): Enables or disables the overlay
* seconds (number): Seconds to show overlay
* color (string): Hex code ("#XXXXXX") or color name ("black") |
388,999 | def run(self):
if KSER_METRICS_ENABLED == "yes":
from prometheus_client import start_http_server
logger.info("Metric.Starting...")
start_http_server(
os.getenv("KSER_METRICS_PORT", 8888),
os.getenv("KSER_METRICS_ADDRESS", "0.0.0.0")
)
logger.info("{}.Starting...".format(self.__class__.__name__))
running = True
while running:
msg = self.client.poll()
if msg:
if not msg.error():
self.REGISTRY.run(msg.value().decode())
elif msg.error().code() != KafkaError._PARTITION_EOF:
logger.error(msg.error())
running = False
self.client.close() | Run consumer |
Subsets and Splits