language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public boolean waitForRegistered(long timeout, TimeUnit unit) {
try {
if (this.registeredLatch.await(timeout, unit)) {
return true;
}
} catch (InterruptedException e) {
LOG.severe("Failed to wait for mesos framework got registered");
return false;
}
return false;
} |
python | def LoadData(self, data, custom_properties=None):
"""Loads new rows to the data table, clearing existing rows.
May also set the custom_properties for the added rows. The given custom
properties dictionary specifies the dictionary that will be used for *all*
given rows.
Args:
data: The rows that the table will contain.
custom_properties: A dictionary of string to string to set as the custom
properties for all rows.
"""
self.__data = []
self.AppendData(data, custom_properties) |
python | def propagate_occur(self, node, value):
"""Propagate occurence `value` to `node` and its ancestors.
Occurence values are defined and explained in the SchemaNode
class.
"""
while node.occur < value:
node.occur = value
if node.name == "define":
break
node = node.parent |
java | protected final void write(PrintWriter printWriter, A a, Alphabet<I> inputs) {
writeState(printWriter);
writeEdge(printWriter);
writeETF(printWriter, a, inputs);
printWriter.close();
} |
python | def projector_generator(ket, bra):
"""
Generate a Pauli Sum that corresponds to the projection operator |ket><bra|
note: ket and bra are numerically ordered such that ket = [msd, ..., lsd]
where msd == most significant digit and lsd = least significant digit.
:param List ket: string of zeros and ones corresponding to a computational
basis state.
:param List bra: string of zeros and ones corresponding to a computational
basis state.
:return: projector as a pauli sum
:rytpe: PauliSum
"""
projectors = []
for index, (ket_one_qubit, bra_one_qubit) in enumerate(zip(ket[::-1], bra[::-1])):
projectors.append(_single_projector_generator(ket_one_qubit,
bra_one_qubit, index))
return reduce(lambda x, y: x * y, projectors) |
python | def remove_all_lambda_permissions(app_name='', env='', region='us-east-1'):
"""Remove all foremast-* permissions from lambda.
Args:
app_name (str): Application name
env (str): AWS environment
region (str): AWS region
"""
session = boto3.Session(profile_name=env, region_name=region)
lambda_client = session.client('lambda')
legacy_prefix = app_name + "_"
lambda_arn = get_lambda_arn(app_name, env, region)
lambda_alias_arn = get_lambda_alias_arn(app_name, env, region)
arns = (lambda_arn, lambda_alias_arn)
for arn in arns:
try:
response = lambda_client.get_policy(FunctionName=arn)
except boto3.exceptions.botocore.exceptions.ClientError as error:
LOG.info("No policy exists for function %s, skipping deletion", arn)
LOG.debug(error)
continue
policy_json = json.loads(response['Policy'])
LOG.debug("Found Policy: %s", response)
for perm in policy_json['Statement']:
if perm['Sid'].startswith(FOREMAST_PREFIX) or perm['Sid'].startswith(legacy_prefix):
lambda_client.remove_permission(FunctionName=arn, StatementId=perm['Sid'])
LOG.info('removed permission: %s', perm['Sid'])
else:
LOG.info('Skipping deleting permission %s - Not managed by Foremast', perm['Sid']) |
java | public CreateElasticsearchDomainRequest withAdvancedOptions(java.util.Map<String, String> advancedOptions) {
setAdvancedOptions(advancedOptions);
return this;
} |
java | public CharSequence matchesPattern(final CharSequence input, final String pattern) {
if (!Pattern.matches(pattern, input)) {
fail(String.format(DEFAULT_MATCHES_PATTERN_EX, input, pattern));
}
return input;
} |
python | def _update_fitness(self, action_set):
"""Update the fitness values of the rules belonging to this action
set."""
# Compute the accuracy of each rule. Accuracy is inversely
# proportional to error. Below a certain error threshold, accuracy
# becomes constant. Accuracy values range over (0, 1].
total_accuracy = 0
accuracies = {}
for rule in action_set:
if rule.error < self.error_threshold:
accuracy = 1
else:
accuracy = (
self.accuracy_coefficient *
(rule.error / self.error_threshold) **
-self.accuracy_power
)
accuracies[rule] = accuracy
total_accuracy += accuracy * rule.numerosity
# On rare occasions we have zero total accuracy. This avoids a div
# by zero
total_accuracy = total_accuracy or 1
# Use the relative accuracies of the rules to update their fitness
for rule in action_set:
accuracy = accuracies[rule]
rule.fitness += (
self.learning_rate *
(accuracy * rule.numerosity / total_accuracy -
rule.fitness)
) |
java | protected void createVersionHistory(ImportNodeData nodeData) throws RepositoryException
{
// Generate new VersionHistoryIdentifier and BaseVersionIdentifier
// if uuid changed after UC
boolean newVersionHistory = nodeData.isNewIdentifer() || !nodeData.isContainsVersionhistory();
if (newVersionHistory)
{
nodeData.setVersionHistoryIdentifier(IdGenerator.generate());
nodeData.setBaseVersionIdentifier(IdGenerator.generate());
}
PlainChangesLogImpl changes = new PlainChangesLogImpl();
// using VH helper as for one new VH, all changes in changes log
new VersionHistoryDataHelper(nodeData, changes, dataConsumer, nodeTypeDataManager,
nodeData.getVersionHistoryIdentifier(), nodeData.getBaseVersionIdentifier());
if (!newVersionHistory)
{
for (ItemState state : changes.getAllStates())
{
if (!state.getData().getQPath().isDescendantOf(Constants.JCR_SYSTEM_PATH))
{
changesLog.add(state);
}
}
}
else
{
changesLog.addAll(changes.getAllStates());
}
} |
python | def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e) |
python | def _drop_labels_or_levels(self, keys, axis=0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
if self.ndim > 2:
raise NotImplementedError(
"_drop_labels_or_levels is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [k for k in keys if not
self._is_label_or_level_reference(k, axis=axis)]
if invalid_keys:
raise ValueError(("The following keys are not valid labels or "
"levels for axis {axis}: {invalid_keys}")
.format(axis=axis,
invalid_keys=invalid_keys))
# Compute levels and labels to drop
levels_to_drop = [k for k in keys
if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys
if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped |
python | def _build(self, inputs, keep_prob=None, is_training=None,
test_local_stats=True):
"""Connects the AlexNet module into the graph.
The is_training flag only controls the batch norm settings, if `False` it
does not force no dropout by overriding any input `keep_prob`. To avoid any
confusion this may cause, if `is_training=False` and `keep_prob` would cause
dropout to be applied, an error is thrown.
Args:
inputs: A Tensor of size [batch_size, input_height, input_width,
input_channels], representing a batch of input images.
keep_prob: A scalar Tensor representing the dropout keep probability.
When `is_training=False` this must be None or 1 to give no dropout.
is_training: Boolean to indicate if we are currently training. Must be
specified if batch normalization or dropout is used.
test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
normalization should use local batch statistics at test time.
By default `True`.
Returns:
A Tensor of size [batch_size, output_size], where `output_size` depends
on the mode the network was constructed in.
Raises:
base.IncompatibleShapeError: If any of the input image dimensions
(input_height, input_width) are too small for the given network mode.
ValueError: If `keep_prob` is not None or 1 when `is_training=False`.
ValueError: If `is_training` is not explicitly specified when using
batch normalization.
"""
# Check input shape
if (self._use_batch_norm or keep_prob is not None) and is_training is None:
raise ValueError("Boolean is_training flag must be explicitly specified "
"when using batch normalization or dropout.")
input_shape = inputs.get_shape().as_list()
if input_shape[1] < self._min_size or input_shape[2] < self._min_size:
raise base.IncompatibleShapeError(
"Image shape too small: ({:d}, {:d}) < {:d}".format(
input_shape[1], input_shape[2], self._min_size))
net = inputs
# Check keep prob
if keep_prob is not None:
valid_inputs = tf.logical_or(is_training, tf.equal(keep_prob, 1.))
keep_prob_check = tf.assert_equal(
valid_inputs, True,
message="Input `keep_prob` must be None or 1 if `is_training=False`.")
with tf.control_dependencies([keep_prob_check]):
net = tf.identity(net)
for i, params in enumerate(self._conv_layers):
output_channels, conv_params, max_pooling = params
kernel_size, stride = conv_params
conv_mod = conv.Conv2D(
name="conv_{}".format(i),
output_channels=output_channels,
kernel_shape=kernel_size,
stride=stride,
padding=conv.VALID,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers)
if not self.is_connected:
self._conv_modules.append(conv_mod)
net = conv_mod(net)
if self._use_batch_norm:
bn = batch_norm.BatchNorm(**self._batch_norm_config)
net = bn(net, is_training, test_local_stats)
net = tf.nn.relu(net)
if max_pooling is not None:
pooling_kernel_size, pooling_stride = max_pooling
net = tf.nn.max_pool(
net,
ksize=[1, pooling_kernel_size, pooling_kernel_size, 1],
strides=[1, pooling_stride, pooling_stride, 1],
padding=conv.VALID)
net = basic.BatchFlatten(name="flatten")(net)
for i, output_size in enumerate(self._fc_layers):
linear_mod = basic.Linear(
name="fc_{}".format(i),
output_size=output_size,
initializers=self._initializers,
partitioners=self._partitioners)
if not self.is_connected:
self._linear_modules.append(linear_mod)
net = linear_mod(net)
if self._use_batch_norm and self._bn_on_fc_layers:
bn = batch_norm.BatchNorm(**self._batch_norm_config)
net = bn(net, is_training, test_local_stats)
net = tf.nn.relu(net)
if keep_prob is not None:
net = tf.nn.dropout(net, keep_prob=keep_prob)
return net |
python | def erase_line(method=EraseMethod.ALL, file=sys.stdout):
""" Erase a line, or part of a line. See `method` argument below.
Cursor position does not change.
Esc[<method>K
Arguments:
method : One of these possible values:
EraseMethod.END or 0:
Clear from cursor to the end of the line.
EraseMethod.START or 1:
Clear from cursor to the start of the line.
EraseMethod.ALL or 2:
Clear the entire line.
Default: EraseMethod.ALL (2)
"""
erase.line(method).write(file=file) |
python | def __generic_save_as(self):
"""Returns False if user has cancelled operation, otherwise True."""
page = self._get_page()
if not page.editor.f:
return True
if page.editor.f.filename:
d = page.editor.f.filename
else:
d = os.path.join(self.save_dir if self.save_dir is not None \
else self.load_dir if self.load_dir is not None \
else ".", page.editor.f.default_filename)
new_filename = QFileDialog.getSaveFileName(self, page.make_text_saveas(), d, page.wild)[0]
if new_filename:
self.save_dir, _ = os.path.split(str(new_filename))
page.editor.f.save_as(str(new_filename))
page.flag_changed = False
self._update_gui_text_tabs()
page.editor.update_gui_label_fn()
return True
return False |
python | def get_block(self, usage_id, for_parent=None):
"""
Create an XBlock instance in this runtime.
The `usage_id` is used to find the XBlock class and data.
"""
def_id = self.id_reader.get_definition_id(usage_id)
try:
block_type = self.id_reader.get_block_type(def_id)
except NoSuchDefinition:
raise NoSuchUsage(repr(usage_id))
keys = ScopeIds(self.user_id, block_type, def_id, usage_id)
block = self.construct_xblock(block_type, keys, for_parent=for_parent)
return block |
java | public String getString(String name, String otherwise) {
return data.getString(name, otherwise);
} |
python | def commit_handler(self, cmd):
"""Process a CommitCommand."""
# These pass through if they meet the filtering conditions
interesting_filecmds = self._filter_filecommands(cmd.iter_files)
if interesting_filecmds or not self.squash_empty_commits:
# If all we have is a single deleteall, skip this commit
if len(interesting_filecmds) == 1 and isinstance(
interesting_filecmds[0], commands.FileDeleteAllCommand):
pass
else:
# Remember just the interesting file commands
self.keep = True
cmd.file_iter = iter(interesting_filecmds)
# Record the referenced blobs
for fc in interesting_filecmds:
if isinstance(fc, commands.FileModifyCommand):
if (fc.dataref is not None and
not stat.S_ISDIR(fc.mode)):
self.referenced_blobs.append(fc.dataref)
# Update from and merges to refer to commits in the output
cmd.from_ = self._find_interesting_from(cmd.from_)
cmd.merges = self._find_interesting_merges(cmd.merges)
else:
self.squashed_commits.add(cmd.id)
# Keep track of the parents
if cmd.from_ and cmd.merges:
parents = [cmd.from_] + cmd.merges
elif cmd.from_:
parents = [cmd.from_]
else:
parents = None
if cmd.mark is not None:
self.parents[b':' + cmd.mark] = parents |
python | def setactive(self, scriptname):
"""Define the active script
See MANAGESIEVE specifications, section 2.8
If scriptname is empty, the current active script is disabled,
ie. there will be no active script anymore.
:param scriptname: script's name
:rtype: boolean
"""
code, data = self.__send_command(
"SETACTIVE", [scriptname.encode("utf-8")])
if code == "OK":
return True
return False |
python | def add_request_ids_from_environment(logger, name, event_dict):
"""Custom processor adding request IDs to the log event, if available."""
if ENV_APIG_REQUEST_ID in os.environ:
event_dict['api_request_id'] = os.environ[ENV_APIG_REQUEST_ID]
if ENV_LAMBDA_REQUEST_ID in os.environ:
event_dict['lambda_request_id'] = os.environ[ENV_LAMBDA_REQUEST_ID]
return event_dict |
java | public void doNewRecord(boolean bDisplayOption)
{
super.doNewRecord(bDisplayOption);
try {
if (this.getOwner().isOpen()) // Don't do first time!
{
boolean bOldEnableState = this.isEnabledListener();
this.setEnabledListener(false); // Just in case AddNew decides to call this
this.getOwner().close();
if (this.getOwner().hasNext()) // records yet?
this.getOwner().next();
else
this.getOwner().addNew(); // Make a new one
this.setEnabledListener(bOldEnableState);
}
} catch (DBException ex) {
if (ex.getErrorCode() == DBConstants.FILE_NOT_FOUND)
if ((this.getOwner().getOpenMode() & DBConstants.OPEN_DONT_CREATE) == DBConstants.OPEN_DONT_CREATE)
return; // Special case - they didn't want the table created if not found
ex.printStackTrace(); // Never
}
} |
java | public Date getTime( String key, Date defaultValue )
throws MissingResourceException
{
try
{
return getTime( key );
}
catch( MissingResourceException mre )
{
return defaultValue;
}
} |
python | def portgroups_configured(name, dvs, portgroups):
'''
Configures portgroups on a DVS.
Creates/updates/removes portgroups in a provided DVS
dvs
Name of the DVS
portgroups
Portgroup dict representations (see module sysdocs)
'''
datacenter = _get_datacenter_name()
log.info('Running state %s on DVS \'%s\', datacenter \'%s\'',
name, dvs, datacenter)
changes_required = False
ret = {'name': name,
'changes': {},
'result': None,
'comment': None}
comments = []
changes = {}
changes_required = False
try:
#TODO portroups validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_pgs = __salt__['vsphere.list_dvportgroups'](
dvs=dvs, service_instance=si)
expected_pg_names = []
for pg in portgroups:
pg_name = pg['name']
expected_pg_names.append(pg_name)
del pg['name']
log.info('Checking pg \'%s\'', pg_name)
filtered_current_pgs = \
[p for p in current_pgs if p.get('name') == pg_name]
if not filtered_current_pgs:
changes_required = True
if __opts__['test']:
comments.append('State {0} will create a new portgroup '
'\'{1}\' in DVS \'{2}\', datacenter '
'\'{3}\''.format(name, pg_name, dvs,
datacenter))
else:
__salt__['vsphere.create_dvportgroup'](
portgroup_dict=pg, portgroup_name=pg_name, dvs=dvs,
service_instance=si)
comments.append('Created a new portgroup \'{0}\' in DVS '
'\'{1}\', datacenter \'{2}\''
''.format(pg_name, dvs, datacenter))
log.info(comments[-1])
changes.update({pg_name: {'new': pg}})
else:
# Porgroup already exists. Checking the config
log.trace('Portgroup \'%s\' found in DVS \'%s\', datacenter '
'\'%s\'. Checking for any updates.',
pg_name, dvs, datacenter)
current_pg = filtered_current_pgs[0]
diff_dict = _get_diff_dict(current_pg, pg)
if diff_dict:
changes_required = True
if __opts__['test']:
changes_strings = \
_get_changes_from_diff_dict(diff_dict)
log.trace('changes_strings = %s', changes_strings)
comments.append(
'State {0} will update portgroup \'{1}\' in '
'DVS \'{2}\', datacenter \'{3}\':\n{4}'
''.format(name, pg_name, dvs, datacenter,
'\n'.join(['\t{0}'.format(c) for c in
changes_strings])))
else:
__salt__['vsphere.update_dvportgroup'](
portgroup_dict=pg, portgroup=pg_name, dvs=dvs,
service_instance=si)
comments.append('Updated portgroup \'{0}\' in DVS '
'\'{1}\', datacenter \'{2}\''
''.format(pg_name, dvs, datacenter))
log.info(comments[-1])
changes.update(
{pg_name: {'new':
_get_val2_dict_from_diff_dict(diff_dict),
'old':
_get_val1_dict_from_diff_dict(diff_dict)}})
# Add the uplink portgroup to the expected pg names
uplink_pg = __salt__['vsphere.list_uplink_dvportgroup'](
dvs=dvs, service_instance=si)
expected_pg_names.append(uplink_pg['name'])
# Remove any extra portgroups
for current_pg in current_pgs:
if current_pg['name'] not in expected_pg_names:
changes_required = True
if __opts__['test']:
comments.append('State {0} will remove '
'the portgroup \'{1}\' from DVS \'{2}\', '
'datacenter \'{3}\''
''.format(name, current_pg['name'], dvs,
datacenter))
else:
__salt__['vsphere.remove_dvportgroup'](
portgroup=current_pg['name'], dvs=dvs,
service_instance=si)
comments.append('Removed the portgroup \'{0}\' from DVS '
'\'{1}\', datacenter \'{2}\''
''.format(current_pg['name'], dvs,
datacenter))
log.info(comments[-1])
changes.update({current_pg['name']:
{'old': current_pg}})
__salt__['vsphere.disconnect'](si)
except salt.exceptions.CommandExecutionError as exc:
log.exception('Encountered error')
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('All portgroups in DVS \'{0}\', datacenter '
'\'{1}\' exist and are correctly configured. '
'Nothing to be done.'.format(dvs, datacenter)),
'result': True})
else:
ret.update({
'comment': '\n'.join(comments),
'changes': changes,
'result': None if __opts__['test'] else True,
})
return ret |
python | def is_domterm(cls):
"""
:return: whether we are inside DomTerm
:rtype: bool
"""
import os
if cls._is_domterm is not None:
return cls._is_domterm
if not os.environ.get("DOMTERM"):
cls._is_domterm = False
return False
cls._is_domterm = True
return True |
java | public static List<HistoryDTO> transformToDto(List<History> list) {
if (list == null) {
throw new WebApplicationException("Null entity object cannot be converted to Dto object.", Status.INTERNAL_SERVER_ERROR);
}
List<HistoryDTO> result = new ArrayList<HistoryDTO>();
for (History history : list) {
result.add(transformToDto(history));
}
return result;
} |
java | public void setBaselineDurationText(int baselineNumber, String value)
{
set(selectField(TaskFieldLists.BASELINE_DURATIONS, baselineNumber), value);
} |
java | public static Configuration get() {
Configuration conf = new Configuration();
File cloudConfigPath;
if (isWindows()) {
cloudConfigPath = new File(getEnvironment().get("APPDATA"), "gcloud");
} else {
cloudConfigPath = new File(System.getProperty("user.home"), ".config/gcloud");
}
File credentialFilePath = new File(cloudConfigPath, "application_default_credentials.json");
if (!credentialFilePath.exists()) {
return conf;
}
try {
JsonFactory jsonFactory = Utils.getDefaultJsonFactory();
InputStream inputStream = new FileInputStream(credentialFilePath);
JsonObjectParser parser = new JsonObjectParser(jsonFactory);
GenericJson fileContents = parser.parseAndClose(
inputStream, Charsets.UTF_8, GenericJson.class);
String fileType = (String) fileContents.get("type");
if ("authorized_user".equals(fileType)) {
String clientId = (String) fileContents.get("client_id");
String clientSecret = (String) fileContents.get("client_secret");
if (clientId != null && clientSecret != null) {
LOG.debug("Using GCP user credential from '{}'", credentialFilePath);
conf.setIfUnset("fs.gs.impl", GoogleHadoopFileSystem.class.getName());
conf.setIfUnset("fs.AbstractFileSystem.gs.impl", GoogleHadoopFS.class.getName());
conf.setIfUnset(GoogleHadoopFileSystemBase.GCS_PROJECT_ID_KEY, defaultProject());
conf.setIfUnset(GoogleHadoopFileSystemBase.GCS_WORKING_DIRECTORY_KEY, "/hadoop");
conf.setIfUnset(
HadoopCredentialConfiguration.BASE_KEY_PREFIX +
HadoopCredentialConfiguration.ENABLE_SERVICE_ACCOUNTS_SUFFIX,
"false");
conf.setIfUnset(
HadoopCredentialConfiguration.BASE_KEY_PREFIX +
HadoopCredentialConfiguration.CLIENT_ID_SUFFIX,
clientId);
conf.setIfUnset(
HadoopCredentialConfiguration.BASE_KEY_PREFIX +
HadoopCredentialConfiguration.CLIENT_SECRET_SUFFIX,
clientSecret);
}
}
} catch (IOException e) {
LOG.warn("Failed to load GCP user credential from '{}'", credentialFilePath);
}
return conf;
} |
java | public static CronetChannelBuilder forAddress(String host, int port, CronetEngine cronetEngine) {
Preconditions.checkNotNull(cronetEngine, "cronetEngine");
return new CronetChannelBuilder(host, port, cronetEngine);
} |
python | def dir_tails_target(self, rr_id) -> str:
"""
Return target directory for revocation registry and tails file production.
:param rr_id: revocation registry identifier
:return: tails target directory
"""
return join(self.dir_tails_top(rr_id), rev_reg_id2cred_def_id(rr_id)) |
java | public void renderIconImage(ImageTag.State state, TreeElement elem)
{
ArrayList al = _lists[TreeHtmlAttributeInfo.HTML_LOCATION_ICON];
assert(al != null);
if (al.size() == 0)
return;
int cnt = al.size();
for (int i = 0; i < cnt; i++) {
TreeHtmlAttributeInfo attr = (TreeHtmlAttributeInfo) al.get(i);
state.registerAttribute(AbstractHtmlState.ATTR_GENERAL, attr.getAttribute(), attr.getValue());
}
} |
python | def update_alias_verification(sender, instance, **kwargs):
"""
Flags a user's email as unverified if they change it.
Optionally sends a verification token to the new endpoint.
"""
if isinstance(instance, User):
if instance.id:
if api_settings.PASSWORDLESS_USER_MARK_EMAIL_VERIFIED is True:
"""
For marking email aliases as not verified when a user changes it.
"""
email_field = api_settings.PASSWORDLESS_USER_EMAIL_FIELD_NAME
email_verified_field = api_settings.PASSWORDLESS_USER_EMAIL_VERIFIED_FIELD_NAME
# Verify that this is an existing instance and not a new one.
try:
user_old = User.objects.get(id=instance.id) # Pre-save object
instance_email = getattr(instance, email_field) # Incoming Email
old_email = getattr(user_old, email_field) # Pre-save object email
if instance_email != old_email and instance_email != "" and instance_email is not None:
# Email changed, verification should be flagged
setattr(instance, email_verified_field, False)
if api_settings.PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN is True:
email_subject = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_SUBJECT
email_plaintext = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_PLAINTEXT_MESSAGE
email_html = api_settings.PASSWORDLESS_EMAIL_VERIFICATION_TOKEN_HTML_TEMPLATE_NAME
message_payload = {'email_subject': email_subject,
'email_plaintext': email_plaintext,
'email_html': email_html}
success = TokenService.send_token(instance, 'email', **message_payload)
if success:
logger.info('drfpasswordless: Successfully sent email on updated address: %s'
% instance_email)
else:
logger.info('drfpasswordless: Failed to send email to updated address: %s'
% instance_email)
except User.DoesNotExist:
# User probably is just initially being created
setattr(instance, email_verified_field, True)
if api_settings.PASSWORDLESS_USER_MARK_MOBILE_VERIFIED is True:
"""
For marking mobile aliases as not verified when a user changes it.
"""
mobile_field = api_settings.PASSWORDLESS_USER_MOBILE_FIELD_NAME
mobile_verified_field = api_settings.PASSWORDLESS_USER_MOBILE_VERIFIED_FIELD_NAME
# Verify that this is an existing instance and not a new one.
try:
user_old = User.objects.get(id=instance.id) # Pre-save object
instance_mobile = getattr(instance, mobile_field) # Incoming mobile
old_mobile = getattr(user_old, mobile_field) # Pre-save object mobile
if instance_mobile != old_mobile and instance_mobile != "" and instance_mobile is not None:
# Mobile changed, verification should be flagged
setattr(instance, mobile_verified_field, False)
if api_settings.PASSWORDLESS_AUTO_SEND_VERIFICATION_TOKEN is True:
mobile_message = api_settings.PASSWORDLESS_MOBILE_MESSAGE
message_payload = {'mobile_message': mobile_message}
success = TokenService.send_token(instance, 'mobile', **message_payload)
if success:
logger.info('drfpasswordless: Successfully sent SMS on updated mobile: %s'
% instance_mobile)
else:
logger.info('drfpasswordless: Failed to send SMS to updated mobile: %s'
% instance_mobile)
except User.DoesNotExist:
# User probably is just initially being created
setattr(instance, mobile_verified_field, True) |
java | public Producer<CloseableReference<PooledByteBuffer>>
getLocalFileFetchEncodedImageProducerSequence() {
synchronized (this) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection(
"ProducerSequenceFactory#getLocalFileFetchEncodedImageProducerSequence");
}
if (mLocalFileEncodedImageProducerSequence == null) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection(
"ProducerSequenceFactory#getLocalFileFetchEncodedImageProducerSequence:init");
}
mLocalFileEncodedImageProducerSequence =
new RemoveImageTransformMetaDataProducer(
getBackgroundLocalFileFetchToEncodeMemorySequence());
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
return mLocalFileEncodedImageProducerSequence;
} |
python | def add(self, name: str, sig: Tuple, obj: object) -> None:
"""
Add a file to the cache
:param name: name of the object to be pickled
:param sig: signature for object
:param obj: object to pickle
"""
if self._cache_directory is not None:
if name in self._cache:
os.remove(os.path.join(self._cache_directory, self._cache[name].loc))
fname = os.path.join(self._cache_directory, str(uuid.uuid4()))
with open(fname, 'wb') as f:
pickle.dump(obj, f)
self._cache[name] = _PickleJar.CacheEntry(sig, fname)
self._update() |
java | @SuppressWarnings("unchecked")
public EList<String> getPunchList() {
return (EList<String>) eGet(Ifc2x3tc1Package.Literals.IFC_MOVE__PUNCH_LIST, true);
} |
java | public ServiceFuture<VirtualMachineExtensionsListResultInner> getExtensionsAsync(String resourceGroupName, String vmName, String expand, final ServiceCallback<VirtualMachineExtensionsListResultInner> serviceCallback) {
return ServiceFuture.fromResponse(getExtensionsWithServiceResponseAsync(resourceGroupName, vmName, expand), serviceCallback);
} |
python | def _generate_nodes(self, name, command, templates=None):
"""Generate the relevant Sphinx nodes.
Generates a section for the Tree datamodel. Formats a tree section
as a list-table directive.
Parameters:
name (str):
The name of the config to be documented, e.g. 'sdsswork'
command (object):
The loaded module
templates (bool):
If True, generate a section for the path templates
Returns:
A section docutil node
"""
# the source name
source_name = name
# Title
section = nodes.section(
'',
nodes.title(text=name),
ids=[nodes.make_id(name)],
names=[nodes.fully_normalize_name(name)])
# Summarize
result = statemachine.ViewList()
if templates:
lines = _format_templates(name, command, command.templates)
for line in lines:
result.append(line, source_name)
self.state.nested_parse(result, 0, section)
return [section] |
python | def enable(
self,
cmd="enable",
pattern="password",
re_flags=re.IGNORECASE,
default_username="manager",
):
"""Enter enable mode"""
if self.check_enable_mode():
return ""
output = self.send_command_timing(cmd)
if (
"username" in output.lower()
or "login name" in output.lower()
or "user name" in output.lower()
):
output += self.send_command_timing(default_username)
if "password" in output.lower():
output += self.send_command_timing(self.secret)
log.debug("{}".format(output))
self.clear_buffer()
return output |
java | public static Map<CurrencyPair, Fee> adaptDynamicTradingFees(
BitfinexTradingFeeResponse[] responses, List<CurrencyPair> currencyPairs) {
Map<CurrencyPair, Fee> result = new HashMap<>();
for (BitfinexTradingFeeResponse response : responses) {
BitfinexTradingFeeResponse.BitfinexTradingFeeResponseRow[] responseRows =
response.getTradingFees();
for (BitfinexTradingFeeResponse.BitfinexTradingFeeResponseRow responseRow : responseRows) {
Currency currency = Currency.getInstance(responseRow.getCurrency());
BigDecimal percentToFraction = BigDecimal.ONE.divide(BigDecimal.ONE.scaleByPowerOfTen(2));
Fee fee =
new Fee(
responseRow.getMakerFee().multiply(percentToFraction),
responseRow.getTakerFee().multiply(percentToFraction));
for (CurrencyPair pair : currencyPairs) {
// Fee to trade for a currency is the fee to trade currency pairs with this base.
// Fee is typically assessed in units counter.
if (pair.base.equals(currency)) {
if (result.put(pair, fee) != null) {
throw new IllegalStateException(
"Fee for currency pair " + pair + " is overspecified");
}
}
}
}
}
return result;
} |
python | def next_message(self):
"""Block until a message(request or notification) is available.
If any messages were previously enqueued, return the first in queue.
If not, run the event loop until one is received.
"""
msg = self._session.next_message()
if msg:
return walk(self._from_nvim, msg) |
java | public void ifPropertyValueEquals(String template, Properties attributes) throws XDocletException
{
String value = getPropertyValue(attributes.getProperty(ATTRIBUTE_LEVEL), attributes.getProperty(ATTRIBUTE_NAME));
String expected = attributes.getProperty(ATTRIBUTE_VALUE);
if (value == null)
{
value = attributes.getProperty(ATTRIBUTE_DEFAULT);
}
if (expected.equals(value))
{
generate(template);
}
} |
python | def toggle_object_status(self, objname):
"""
Toggle boolean-valued sensor status between ``True`` and ``False``.
"""
o = getattr(self.system, objname)
o.status = not o.status
self.system.flush()
return o.status |
java | protected final void runOnGlThread(final Runnable r) {
getGVRContext().runOnGlThread(new Runnable() {
public void run() {
FPSCounter.timeCheck("runOnGlThread <START>: " + r);
r.run();
FPSCounter.timeCheck("runOnGlThread <END>: " + r);
}
});
} |
java | @Override
public void mouseClicked(MouseEvent evt) {
if (evt.getClickCount() == 2) {
triggerMaximisation((Component) evt.getSource());
}
} |
python | def operates_on(self, qubits: Iterable[raw_types.Qid]) -> bool:
"""Determines if the moment has operations touching the given qubits.
Args:
qubits: The qubits that may or may not be touched by operations.
Returns:
Whether this moment has operations involving the qubits.
"""
return any(q in qubits for q in self.qubits) |
python | def update_file(filename, items):
'''Edits the given file in place, replacing any instances of {key} with the
appropriate value from the provided items dict. If the given filename ends
with ".xml" values will be quoted and escaped for XML.
'''
# TODO: Implement something in the templates to denote whether the value
# being replaced is an XML attribute or a value. Perhaps move to dyanmic
# XML tree building rather than string replacement.
should_escape = filename.endswith('addon.xml')
with open(filename, 'r') as inp:
text = inp.read()
for key, val in items.items():
if should_escape:
val = saxutils.quoteattr(val)
text = text.replace('{%s}' % key, val)
output = text
with open(filename, 'w') as out:
out.write(output) |
java | public void setDatasource(Object datasource) throws PageException, ClassException, BundleException {
if (datasource == null) return;
data.rawDatasource = datasource;
data.datasource = toDatasource(pageContext, datasource);
} |
java | private File createAttachFile(int pid) throws IOException {
String fn = ".attach_pid" + pid;
String path = "/proc/" + pid + "/cwd/" + fn;
File f = new File(path);
try {
f.createNewFile();
} catch (IOException x) {
f = new File(tmpdir, fn);
f.createNewFile();
}
return f;
} |
java | public BinaryKey moveValue( BinaryKey key,
String source,
String destination ) throws BinaryStoreException {
final BinaryStore sourceStore;
if (source == null) {
sourceStore = findBinaryStoreContainingKey(key);
} else {
sourceStore = selectBinaryStore(source);
}
// could not find source store, or
if (sourceStore == null || !sourceStore.hasBinary(key)) {
throw new BinaryStoreException(JcrI18n.unableToFindBinaryValue.text(key, sourceStore));
}
BinaryStore destinationStore = selectBinaryStore(destination);
// key is already in the destination store
if (sourceStore.equals(destinationStore)) {
return key;
}
final BinaryValue binaryValue = storeValue(sourceStore.getInputStream(key), destination, false);
sourceStore.markAsUnused(java.util.Collections.singleton(key));
return binaryValue.getKey();
} |
python | def parse(query_string, unquote=True, normalized=False, encoding=DEFAULT_ENCODING):
'''
Main parse function
@param query_string:
@param unquote: unquote html query string ?
@param encoding: An optional encoding used to decode the keys and values. Defaults to utf-8, which the W3C declares as a defaul in the W3C algorithm for encoding.
@see http://www.w3.org/TR/html5/forms.html#application/x-www-form-urlencoded-encoding-algorithm
@param normalized: parse number key in dict to proper list ?
'''
mydict = {}
plist = []
if query_string == "":
return mydict
if type(query_string) == bytes:
query_string = query_string.decode()
for element in query_string.split("&"):
try:
if unquote:
(var, val) = element.split("=")
if sys.version_info[0] == 2:
var = var.encode('ascii')
val = val.encode('ascii')
var = urllib.unquote_plus(var)
val = urllib.unquote_plus(val)
else:
(var, val) = element.split("=")
except ValueError:
raise MalformedQueryStringError
if encoding:
var = var.decode(encoding)
val = val.decode(encoding)
plist.append(parser_helper(var, val))
for di in plist:
(k, v) = di.popitem()
tempdict = mydict
while k in tempdict and type(v) is dict:
tempdict = tempdict[k]
(k, v) = v.popitem()
if k in tempdict and type(tempdict[k]).__name__ == 'list':
tempdict[k].append(v)
elif k in tempdict:
tempdict[k] = [tempdict[k], v]
else:
tempdict[k] = v
if normalized == True:
return _normalize(mydict)
return mydict |
python | def resolve_object(self, object_arg_name, resolver):
"""
A helper decorator to resolve object instance from arguments (e.g. identity).
Example:
>>> @namespace.route('/<int:user_id>')
... class MyResource(Resource):
... @namespace.resolve_object(
... object_arg_name='user',
... resolver=lambda kwargs: User.query.get_or_404(kwargs.pop('user_id'))
... )
... def get(self, user):
... # user is a User instance here
"""
def decorator(func_or_class):
if isinstance(func_or_class, type):
# Handle Resource classes decoration
# pylint: disable=protected-access
func_or_class._apply_decorator_to_methods(decorator)
return func_or_class
@wraps(func_or_class)
def wrapper(*args, **kwargs):
kwargs[object_arg_name] = resolver(kwargs)
return func_or_class(*args, **kwargs)
return wrapper
return decorator |
java | public boolean compatible( PropertySet properties)
{
boolean isCompatible;
Iterator<IAssertion> assertions;
for( assertions = getAssertions(),
isCompatible = !assertions.hasNext();
!isCompatible
&& assertions.hasNext();
isCompatible = assertions.next().compatible( properties));
return isCompatible;
} |
java | public <N, S extends Session<L>> void discoverChildren(
Resource<L> parent,
ResourceType<L> childType,
Session<L> session,
EndpointService<L, S> service,
Consumer<Resource<L>> resourceConsumer) {
try {
L parentLocation = parent != null ? parent.getLocation() : null;
log.debugf("Discovering children of [%s] of type [%s]", parent, childType);
final L childQuery = session.getLocationResolver().absolutize(parentLocation, childType.getLocation());
Map<L, N> nativeResources = session.getDriver().fetchNodes(childQuery);
for (Map.Entry<L, N> entry : nativeResources.entrySet()) {
L location = entry.getKey(); // this is the unique DMR address for this resource
String resourceName = session.getLocationResolver().applyTemplate(childType.getResourceNameTemplate(),
location, session.getEndpoint().getName());
ID id = InventoryIdUtil.generateResourceId(
session.getFeedId(),
session.getEndpoint(),
location.toString());
Builder<L> builder = Resource.<L> builder()
.id(id)
.name(new Name(resourceName))
.location(location)
.type(childType);
if (parent != null) {
builder.parent(parent);
}
// get the configuration of the resource
discoverResourceConfiguration(id, childType, location, entry.getValue(), builder, session);
// populate the metrics based on the resource's type
addMetricInstances(id, childType, location, entry.getValue(), builder, session);
// build the resource now - we might need it to generate metric IDs
Resource<L> resource = builder.build();
// The resource is built (and measurement instances assigned to it) so we can generate family names/labels
for (MeasurementInstance<L, MetricType<L>> instance : resource.getMetrics()) {
instance.setMetricFamily(service.generateMetricFamily(instance));
instance.setMetricLabels(service.generateMetricLabels(instance));
}
log.debugf("Discovered resource [%s]", resource);
// tell our consumer about our new resource
if (resourceConsumer != null) {
resourceConsumer.accept(resource);
}
// recursively discover children of child types
Set<ResourceType<L>> childTypes = session.getResourceTypeManager()
.getChildren(childType);
for (ResourceType<L> nextLevelChildType : childTypes) {
discoverChildren(resource, nextLevelChildType, session, service, resourceConsumer);
}
}
} catch (Exception e) {
log.errorFailedToDiscoverResources(e, session.getEndpoint());
resourceConsumer.report(e);
}
} |
java | @Nullable
public static HourRange valueOf(@Nullable final String str) {
if (str == null) {
return null;
}
return new HourRange(str);
} |
python | def _forward_backward(self, logprob):
"""Runs forward-backward algorithm on observation log probs
Given the log probability of each timepoint being generated by
each event, run the HMM forward-backward algorithm to find the
probability that each timepoint belongs to each event (based on the
transition priors in p_start, p_end, and P)
See https://en.wikipedia.org/wiki/Forward-backward_algorithm for
mathematical details
Parameters
----------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
Returns
-------
log_gamma : time by event ndarray
Log probability of each timepoint belonging to each event
ll : float
Log-likelihood of fit
"""
logprob = copy.copy(logprob)
t = logprob.shape[0]
logprob = np.hstack((logprob, float("-inf") * np.ones((t, 1))))
# Initialize variables
log_scale = np.zeros(t)
log_alpha = np.zeros((t, self.n_events + 1))
log_beta = np.zeros((t, self.n_events + 1))
# Set up transition matrix, with final sink state
self.p_start = np.zeros(self.n_events + 1)
self.p_end = np.zeros(self.n_events + 1)
self.P = np.zeros((self.n_events + 1, self.n_events + 1))
label_ind = np.unique(self.event_chains, return_inverse=True)[1]
n_chains = np.max(label_ind) + 1
# For each chain of events, link them together and then to sink state
for c in range(n_chains):
chain_ind = np.nonzero(label_ind == c)[0]
self.p_start[chain_ind[0]] = 1 / n_chains
self.p_end[chain_ind[-1]] = 1 / n_chains
p_trans = (len(chain_ind) - 1) / t
if p_trans >= 1:
raise ValueError('Too few timepoints')
for i in range(len(chain_ind)):
self.P[chain_ind[i], chain_ind[i]] = 1 - p_trans
if i < len(chain_ind) - 1:
self.P[chain_ind[i], chain_ind[i+1]] = p_trans
else:
self.P[chain_ind[i], -1] = p_trans
self.P[-1, -1] = 1
# Forward pass
for i in range(t):
if i == 0:
log_alpha[0, :] = self._log(self.p_start) + logprob[0, :]
else:
log_alpha[i, :] = self._log(np.exp(log_alpha[i - 1, :])
.dot(self.P)) + logprob[i, :]
log_scale[i] = np.logaddexp.reduce(log_alpha[i, :])
log_alpha[i] -= log_scale[i]
# Backward pass
log_beta[-1, :] = self._log(self.p_end) - log_scale[-1]
for i in reversed(range(t - 1)):
obs_weighted = log_beta[i + 1, :] + logprob[i + 1, :]
offset = np.max(obs_weighted)
log_beta[i, :] = offset + self._log(
np.exp(obs_weighted - offset).dot(self.P.T)) - log_scale[i]
# Combine and normalize
log_gamma = log_alpha + log_beta
log_gamma -= np.logaddexp.reduce(log_gamma, axis=1, keepdims=True)
ll = np.sum(log_scale[:(t - 1)]) + np.logaddexp.reduce(
log_alpha[-1, :] + log_scale[-1] + self._log(self.p_end))
log_gamma = log_gamma[:, :-1]
return log_gamma, ll |
java | public final static Reliability getReliabilityByName(String name)
throws NullPointerException, IllegalArgumentException {
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
SibTr.info(tc, "Name = " + name);
if (name == null) {
throw new NullPointerException();
}
/* Look for the name in the nameSet, and return the corresponding */
/* Reliability from the indexSet. */
for (int i = 0; i <= MAX_INDEX + 1; i++) {
if (name.equals(nameSet[i])) {
return indexSet[i];
}
}
/* If the name didn't match, throw IllegalArgumentException */
throw new IllegalArgumentException();
} |
python | def _get_doc_by_raw_offset(self, doc_id):
"""
Load document from xml using bytes offset information.
XXX: this is not tested under Windows.
"""
bounds = self._get_meta()[str(doc_id)].bounds
return xml_utils.load_chunk(self.filename, bounds) |
python | def open01(x, limit=1.e-6):
"""Constrain numbers to (0,1) interval"""
try:
return np.array([min(max(y, limit), 1. - limit) for y in x])
except TypeError:
return min(max(x, limit), 1. - limit) |
java | @Override
public void setBasicAuthCredential(Subject basicAuthSubject, String realm, String username, @Sensitive String password) throws CredentialException {
CredentialProvider provider = basicAuthCredentialProvider.getService();
if (provider != null) {
Hashtable<String, Object> hashtable = new Hashtable<String, Object>();
hashtable.put(KEY_BASIC_AUTH_REALM, realm);
hashtable.put(KEY_BASIC_AUTH_USER, username);
hashtable.put(KEY_BASIC_AUTH_PASSWORD, new SerializableProtectedString(password.toCharArray()));
basicAuthSubject.getPrivateCredentials().add(hashtable);
provider.setCredential(basicAuthSubject);
}
} |
python | def get_machine_id(machine, cwd):
'''
returns the salt_id name of the Vagrant VM
:param machine: the Vagrant machine name
:param cwd: the path to Vagrantfile
:return: salt_id name
'''
name = __utils__['sdb.sdb_get'](_build_machine_uri(machine, cwd), __opts__)
return name |
java | public Optional<UUID> getId(final Object object) {
return Optional.ofNullable(objectToId.get(object));
} |
python | def from_content_type(cls, content_type):
"""
Build a serializer object from a MIME Content-Type string.
:param str content_type: The Content-Type string to parse.
:return: A new serializer instance.
:rtype: :py:class:`.Serializer`
"""
name = content_type
options = {}
if ';' in content_type:
name, options_str = content_type.split(';', 1)
for part in options_str.split(';'):
part = part.strip()
if '=' in part:
key, value = part.split('=')
else:
key, value = (part, None)
options[key] = value
# old style compatibility
if name.endswith('+zlib'):
options['compression'] = 'zlib'
name = name[:-5]
return cls(name, charset=options.get('charset', 'UTF-8'), compression=options.get('compression')) |
java | public List<Attribute> getAttributes(String domain, String beanName, String[] attributes) throws Exception {
checkClientConnected();
return getAttributes(ObjectNameUtil.makeObjectName(domain, beanName), attributes);
} |
java | @Override
public Set<String> mergePossibleUserAttributeNames(final Set<String> toModify, final Set<String> toConsider) {
toModify.addAll(toConsider);
return toModify;
} |
java | public void addDefaultProperty(String key, String value) {
if (StringUtils.isEmpty(key)) {
return;
}
defaultProperties.put(key, value);
} |
python | def send(self, name, sender=None, **kwargs):
"""
Sends the signal. Return every function response\
that was hooked to hook-name as a list: [(func, response), ]
:param str name: The hook name
:param class sender: Optional sender __class__ to which\
registered callback should match (see :py:func:`.connect` method)
:return: Signal responses as a sequence of tuples (func, response)
:rtype: list
"""
try:
signal = self._registry[name]
except KeyError:
return []
return signal.send(sender=sender, **kwargs) |
python | def to_entity(entity_type, value, fields):
"""
Internal API: Returns an instance of an entity of type entity_type with the specified value and fields (stored in
dict). This is only used by the local transform runner as a helper function.
"""
e = entity_type(value)
for k, v in fields.items():
e.fields[k] = Field(k, v)
return e |
python | def _main(args, prog_name):
"""Process the command line arguments of this script and dispatch."""
# get options and arguments
ui = getUI(prog_name, args)
if ui.optionIsSet("test"):
# just run unit tests
unittest.main(argv=[sys.argv[0]])
elif ui.optionIsSet("help"):
# just show help
ui.usage()
else:
verbose = ui.optionIsSet("verbose")
# know there are exactly two arguments, because we specified this in the
# UI description.
regions_fn = ui.getArgument(0)
to_count_fn = ui.getArgument(1)
# get output handle
out_fh = sys.stdout
if ui.optionIsSet("output"):
out_fh = open(ui.getValue("output"), "w")
# norm?
norm = ui.optionIsSet("normalize")
# get anchor point
anch = ANCHOR_START
if ui.optionIsSet("anchor"):
if ui.getValue("anchor") == "5-prime":
anch = ANCHOR_5PRIME
res = process_anchor_start(regions_fn, to_count_fn, anchor_to=anch,
normalize=norm, verbose=verbose)
write_output(res, out_fh, verbose=verbose) |
python | def GetParentFileEntry(self):
"""Retrieves the parent file entry.
Returns:
APFSContainerFileEntry: parent file entry or None if not available.
"""
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(
self.path_spec)
if volume_index is None:
return None
return self._file_system.GetRootFileEntry() |
python | def team_profiles(self, team):
"""
Get team's social media profiles linked on their TBA page.
:param team: Team to get data on.
:return: List of Profile objects.
"""
return [Profile(raw) for raw in self._get('team/%s/social_media' % self.team_key(team))] |
java | private void retrieveNextPage() {
if (this.pageSize < Long.MAX_VALUE || this.itemLimit < Long.MAX_VALUE) {
this.request.query("limit", Math.min(this.pageSize, this.itemLimit - this.count));
} else {
this.request.query("limit", null);
}
ResultBodyCollection<T> page = null;
try {
page = this.getNext();
} catch (IOException exception) {
// See comments in hasNext().
this.ioException = exception;
}
if (page != null) {
this.continuation = this.getContinuation(page);
if (page.data != null && !page.data.isEmpty()) {
this.count += page.data.size();
this.nextData = page.data;
} else {
this.nextData = null;
}
} else {
this.continuation = null;
this.nextData = null;
}
} |
java | public static DateTime parseDate(String dateString) {
dateString = normalize(dateString);
return parse(dateString, DatePattern.NORM_DATE_FORMAT);
} |
python | def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0] |
java | public void setXSIZE(Integer newXSIZE) {
Integer oldXSIZE = xsize;
xsize = newXSIZE;
if (eNotificationRequired())
eNotify(new ENotificationImpl(this, Notification.SET, AfplibPackage.IDD__XSIZE, oldXSIZE, xsize));
} |
java | @Beta
public final void parse(Object destination, CustomizeJsonParser customizeParser)
throws IOException {
ArrayList<Type> context = new ArrayList<Type>();
context.add(destination.getClass());
parse(context, destination, customizeParser);
} |
python | def _get_event(receiver_id, event_id):
"""Find event and check access rights."""
event = Event.query.filter_by(
receiver_id=receiver_id, id=event_id
).first_or_404()
try:
user_id = request.oauth.access_token.user_id
except AttributeError:
user_id = current_user.get_id()
if event.user_id != int(user_id):
abort(401)
return event |
java | BlockCommand getInvalidateBlocks(int maxblocks) {
Block[] deleteList = null;
synchronized (invalidateBlocks) {
deleteList = invalidateBlocks.pollToArray(new Block[Math.min(
invalidateBlocks.size(), maxblocks)]);
}
return (deleteList == null || deleteList.length == 0) ?
null: new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, deleteList);
} |
python | def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
serv = _get_serv(ret=None)
sql = '''select first(id) as fid, first(full_ret) as fret
from returns
where fun = '{0}'
group by fun, id
'''.format(fun)
data = serv.query(sql)
ret = {}
if data:
points = data[0]['points']
for point in points:
ret[point[1]] = salt.utils.json.loads(point[2])
return ret |
java | public boolean get(String name, boolean defaultValue) throws IllegalArgumentException {
ObjectSlot slot = findMandatorySlot(name, boolean.class);
return slot.defaulted ? defaultValue : ((Boolean) slot.fieldValue).booleanValue();
} |
python | def parse_args(argString=None):
"""Parses the command line options and arguments.
:returns: A :py:class:`argparse.Namespace` object created by the
:py:mod:`argparse` module. It contains the values of the
different options.
====================== ====== ================================
Options Type Description
====================== ====== ================================
``--evec`` string The EVEC file from EIGENSOFT
``--scree-plot-title`` string The main title of the scree plot
``--out`` string The name of the output file
====================== ====== ================================
.. note::
No option check is done here (except for the one automatically done by
:py:mod:`argparse`). Those need to be done elsewhere (see
:py:func:`checkArgs`).
"""
args = None
if argString is None:
args = parser.parse_args()
else:
args = parser.parse_args(argString)
return args |
python | def detectSmartphone(self):
"""Return detection of a general smartphone device
Checks to see whether the device is *any* 'smartphone'.
Note: It's better to use DetectTierIphone() for modern touchscreen devices.
"""
return self.detectTierIphone() \
or self.detectS60OssBrowser() \
or self.detectSymbianOS() \
or self.detectWindowsMobile() \
or self.detectBlackBerry() \
or self.detectMeegoPhone() \
or self.detectPalmWebOS() |
java | @Override
public Future<U> link(U current, BasicProfile to) {
return toScala(doLink(current, to));
} |
python | def add_actor(self, uinput, reset_camera=False, name=None, loc=None,
culling=False):
"""
Adds an actor to render window. Creates an actor if input is
a mapper.
Parameters
----------
uinput : vtk.vtkMapper or vtk.vtkActor
vtk mapper or vtk actor to be added.
reset_camera : bool, optional
Resets the camera when true.
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``.
culling : bool optional
Does not render faces that should not be visible to the
plotter. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Default False.
Returns
-------
actor : vtk.vtkActor
The actor.
actor_properties : vtk.Properties
Actor properties.
"""
# Remove actor by that name if present
rv = self.remove_actor(name, reset_camera=False)
if isinstance(uinput, vtk.vtkMapper):
actor = vtk.vtkActor()
actor.SetMapper(uinput)
else:
actor = uinput
self.AddActor(actor)
actor.renderer = proxy(self)
if name is None:
name = str(hex(id(actor)))
self._actors[name] = actor
if reset_camera:
self.reset_camera()
elif not self.camera_set and reset_camera is None and not rv:
self.reset_camera()
else:
self.parent._render()
self.update_bounds_axes()
if culling:
try:
actor.GetProperty().BackfaceCullingOn()
except AttributeError: # pragma: no cover
pass
return actor, actor.GetProperty() |
python | def _set_box(self):
"""
Set the box size for the molecular assembly
"""
net_volume = 0.0
for idx, mol in enumerate(self.mols):
length = max([np.max(mol.cart_coords[:, i])-np.min(mol.cart_coords[:, i])
for i in range(3)]) + 2.0
net_volume += (length**3.0) * float(self.param_list[idx]['number'])
length = net_volume**(1.0/3.0)
for idx, mol in enumerate(self.mols):
self.param_list[idx]['inside box'] = '0.0 0.0 0.0 {} {} {}'.format(
length, length, length) |
java | public java.lang.String getResource() {
java.lang.Object ref = resource_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
resource_ = s;
return s;
}
} |
java | public void fine(
String sourceClass,
String sourceMethod,
String msg
)
{
logp(Level.FINE, sourceClass, sourceMethod, msg);
} |
python | def set_reviewing(self, hit_id, revert=None):
"""
Update a HIT with a status of Reviewable to have a status of Reviewing,
or reverts a Reviewing HIT back to the Reviewable status.
Only HITs with a status of Reviewable can be updated with a status of
Reviewing. Similarly, only Reviewing HITs can be reverted back to a
status of Reviewable.
"""
params = {'HITId' : hit_id,}
if revert:
params['Revert'] = revert
return self._process_request('SetHITAsReviewing', params) |
python | def __set_workdir(self):
"""Set current script directory as working directory"""
fname = self.get_current_filename()
if fname is not None:
directory = osp.dirname(osp.abspath(fname))
self.open_dir.emit(directory) |
java | public static JournalManager createJournal(Configuration conf, URI uri,
NamespaceInfo nsInfo, NameNodeMetrics metrics) {
Class<? extends JournalManager> clazz = getJournalClass(conf,
uri.getScheme());
try {
Constructor<? extends JournalManager> cons = clazz.getConstructor(
Configuration.class, URI.class, NamespaceInfo.class,
NameNodeMetrics.class);
return cons.newInstance(conf, uri, nsInfo, metrics);
} catch (Exception e) {
throw new IllegalArgumentException("Unable to construct journal, " + uri,
e);
}
} |
python | def _generate_validator(self, field):
"""Emits validator if data type has associated validator."""
validator = self._determine_validator_type(field.data_type,
fmt_var(field.name),
field.has_default)
value = fmt_var(
field.name) if not field.has_default else '{} ?: {}'.format(
fmt_var(field.name), fmt_default_value(field))
if validator:
self.emit('{}({});'.format(validator, value)) |
java | private ChocoConstraint build(Constraint cstr) throws SchedulerException {
ChocoMapper mapper = params.getMapper();
ChocoConstraint cc = mapper.get(cstr);
if (cc == null) {
throw new SchedulerModelingException(origin, "No implementation mapped to '" + cstr.getClass().getSimpleName() + "'");
}
return cc;
} |
java | public static int cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, int value, int flags)
{
return checkResult(cuStreamWriteValue32Native(stream, addr, value, flags));
} |
java | @Override
public T addAsManifestResource(URL resource, ArchivePath target) throws IllegalArgumentException {
Validate.notNull(resource, "Resource should be specified");
Validate.notNull(target, "Target should be specified");
File file = new File(resource.getFile());
if (file.exists()) {
return addAsManifestResource(file, target);
}
return addAsManifestResource(new UrlAsset(resource), target);
} |
python | def update(self, friendly_name=values.unset, identity=values.unset,
deployment_sid=values.unset, enabled=values.unset):
"""
Update the DeviceInstance
:param unicode friendly_name: A human readable description for this Device.
:param unicode identity: An identifier of the Device user.
:param unicode deployment_sid: The unique SID of the Deployment group.
:param bool enabled: The enabled
:returns: Updated DeviceInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'Identity': identity,
'DeploymentSid': deployment_sid,
'Enabled': enabled,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return DeviceInstance(
self._version,
payload,
fleet_sid=self._solution['fleet_sid'],
sid=self._solution['sid'],
) |
python | def _from_arrays(self, vertices, faces, deep=True, verts=False):
"""
Set polygons and points from numpy arrays
Parameters
----------
vertices : np.ndarray of dtype=np.float32 or np.float64
Vertex array. 3D points.
faces : np.ndarray of dtype=np.int64
Face index array. Faces can contain any number of points.
Examples
--------
>>> import numpy as np
>>> import vtki
>>> vertices = np.array([[0, 0, 0],
... [1, 0, 0],
... [1, 1, 0],
... [0, 1, 0],
... [0.5, 0.5, 1]])
>>> faces = np.hstack([[4, 0, 1, 2, 3],
... [3, 0, 1, 4],
... [3, 1, 2, 4]]) # one square and two triangles
>>> surf = vtki.PolyData(vertices, faces)
"""
if deep or verts:
vtkpoints = vtk.vtkPoints()
vtkpoints.SetData(numpy_to_vtk(vertices, deep=deep))
self.SetPoints(vtkpoints)
# Convert to a vtk array
vtkcells = vtk.vtkCellArray()
if faces.dtype != vtki.ID_TYPE:
faces = faces.astype(vtki.ID_TYPE)
# get number of faces
if faces.ndim == 1:
c = 0
nfaces = 0
while c < faces.size:
c += faces[c] + 1
nfaces += 1
else:
nfaces = faces.shape[0]
idarr = numpy_to_vtkIdTypeArray(faces.ravel(), deep=deep)
vtkcells.SetCells(nfaces, idarr)
if (faces.ndim > 1 and faces.shape[1] == 2) or verts:
self.SetVerts(vtkcells)
else:
self.SetPolys(vtkcells)
else:
self.points = vertices
self.faces = faces |
java | public JobRun withPredecessorRuns(Predecessor... predecessorRuns) {
if (this.predecessorRuns == null) {
setPredecessorRuns(new java.util.ArrayList<Predecessor>(predecessorRuns.length));
}
for (Predecessor ele : predecessorRuns) {
this.predecessorRuns.add(ele);
}
return this;
} |
java | public static final EnhancementsParser createDefaultParser(HttpResponse response) throws EnhancementParserException, IOException {
ParserConfig config = new ParserConfig();
// Prevent malformed datetime values
// TODO review - added to prevent errors when parsing invalid dates
config.set(BasicParserSettings.VERIFY_DATATYPE_VALUES, false);
String uri = response.getFirstHeader(REDLINK).getValue();
if (uri == null || uri.isEmpty()) {
uri = "urn:uuid-" + UUID.randomUUID().toString();
}
Model result = new TreeModel();
//Prepare to read the response
String charsetStr = ContentType.getOrDefault(response.getEntity()).getCharset().displayName();
Charset charset;
if(charsetStr == null){
charset = UTF8;
} else {
try {
charset = Charset.forName(charsetStr);
}catch (IllegalCharsetNameException | UnsupportedCharsetException e){
log.warn("Unable to use charset '"+ charsetStr +"'. Will fallback to UTF-8", e);
charset = UTF8;
}
}
Reader reader = new InputStreamReader(response.getEntity().getContent(), charset);
try {
ValueFactory vf = new MemValueFactory();
RDFFormat format = RDFFormat.forMIMEType(ContentType.getOrDefault(response.getEntity()).getMimeType());
RDFParser parser = Rio.createParser(format, vf);
parser.setParserConfig(config);
parser.setRDFHandler(new ContextStatementCollector(result, vf));
parser.parse(reader, uri);
} catch (RDFHandlerException | RDFParseException e) {
throw new EnhancementParserException("Error Parsing Analysis results" ,e);
} catch (IOException e) {
throw new EnhancementParserException("Unable to read Analysis response" ,e);
} finally {
IOUtils.closeQuietly(reader);
}
return new RDFStructureParser(result);
} |
python | def add_flow(self, flow):
"""
Add an :class:`Flow` flow to the scheduler.
"""
if hasattr(self, "_flow"):
raise self.Error("Only one flow can be added to the scheduler.")
# Check if we are already using a scheduler to run this flow
flow.check_pid_file()
flow.set_spectator_mode(False)
# Build dirs and files (if not yet done)
flow.build()
with open(flow.pid_file, "wt") as fh:
fh.write(str(self.pid))
self._pid_file = flow.pid_file
self._flow = flow |
java | public BigMoney minusRetainScale(BigMoneyProvider moneyToSubtract, RoundingMode roundingMode) {
BigMoney toSubtract = checkCurrencyEqual(moneyToSubtract);
return minusRetainScale(toSubtract.getAmount(), roundingMode);
} |
java | @Override
public void append(MapTile other, int offsetX, int offsetY)
{
Check.notNull(other);
if (!map.isCreated())
{
map.create(other.getTileWidth(), other.getTileHeight(), 1, 1);
}
if (other.getTileWidth() != map.getTileWidth() || other.getTileHeight() != map.getTileHeight())
{
throw new LionEngineException(ERROR_APPEND_MAP_TILE_SIZE
+ other.getTileWidth()
+ Constant.SPACE
+ map.getTileHeight());
}
final int widthInTile = map.getInTileWidth();
final int heightInTile = map.getInTileHeight();
final int newWidth = Math.max(widthInTile, widthInTile - (widthInTile - offsetX) + other.getInTileWidth());
final int newHeight = Math.max(heightInTile, heightInTile - (heightInTile - offsetY) + other.getInTileHeight());
map.resize(newWidth, newHeight);
appendMap(other, offsetX, offsetY);
} |
python | def d(self,value):
""" set anisotropic scaling """
assert value.shape[0]==self._P*self._N, 'd dimension mismatch'
self._d = value
self.clear_cache('Yhat','Xhat','Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',
'LRLdiag_Xhat_tens','LRLdiag_Yhat','Areml_grad',
'beta_grad','Xstar_beta_grad','Zstar','DLZ') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.