language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | @SuppressWarnings({"unchecked", "unused"})
protected <T extends IGPSObject> List<T> parseObjectArray(final JSONArray array, final Class<T> type) throws ParseException {
try {
if (array == null) {
return new ArrayList<T>(10);
}
final List<T> objects = new ArrayList<T>(10);
for (int i = 0; i < array.length(); i++) {
objects.add((T) this.parse(array.getJSONObject(i)));
}
return objects;
} catch (final JSONException e) {
throw new ParseException("Parsing failed", e);
}
} |
python | def zip_dicts(left, right, prefix=()):
"""
Modified zip through two dictionaries.
Iterate through all keys of left dictionary, returning:
- A nested path
- A value and parent for both dictionaries
"""
for key, left_value in left.items():
path = prefix + (key, )
right_value = right.get(key)
if isinstance(left_value, dict):
yield from zip_dicts(left_value, right_value or {}, path)
else:
yield path, left, left_value, right, right_value |
java | public T messageType(String messageType) {
this.messageType = messageType;
getAction().setMessageType(messageType);
if (binaryMessageConstructionInterceptor.supportsMessageType(messageType)) {
getMessageContentBuilder().add(binaryMessageConstructionInterceptor);
}
if (gzipMessageConstructionInterceptor.supportsMessageType(messageType)) {
getMessageContentBuilder().add(gzipMessageConstructionInterceptor);
}
return self;
} |
java | private void copyLanguageNodes() {
CmsObject cms = getCms();
CmsMultiplexReport report = (CmsMultiplexReport)getReport();
CmsFile file;
CmsXmlContent content;
int totalFiles = m_copyresources.length;
int processedFiles = 0;
Locale sourceLocale = CmsLocaleManager.getLocale(m_sourceLanguage);
Locale targetLocale = CmsLocaleManager.getLocale(m_targetLanguage);
for (int i = 0; i < m_copyresources.length; i++) {
processedFiles++;
report.print(
org.opencms.report.Messages.get().container(
org.opencms.report.Messages.RPT_SUCCESSION_2,
new Object[] {String.valueOf(processedFiles), String.valueOf(totalFiles)}),
I_CmsReport.FORMAT_NOTE);
report.print(
Messages.get().container(Messages.RPT_LOCALIZATION_BYPASS_1, new Object[] {m_copyresources[i]}));
report.print(org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_DOTS_0));
try {
file = cms.readFile(m_copyresources[i]);
content = CmsXmlContentFactory.unmarshal(cms, file);
if (!content.hasLocale(sourceLocale)) {
report.println(
Messages.get().container(
Messages.GUI_REPORT_LANGUAGEC0PY_WARN_SOURCELOCALE_MISSING_1,
new Object[] {sourceLocale}),
I_CmsReport.FORMAT_WARNING);
CmsMessageContainer container = Messages.get().container(
Messages.GUI_REPORT_LANGUAGEC0PY_WARN_SOURCELOCALE_MISSING_2,
new Object[] {m_copyresources[i], sourceLocale});
report.addWarning(container);
} else if (content.hasLocale(targetLocale)) {
report.println(
Messages.get().container(
Messages.GUI_REPORT_LANGUAGEC0PY_WARN_TARGETLOCALE_EXISTS_1,
new Object[] {targetLocale}),
I_CmsReport.FORMAT_WARNING);
CmsMessageContainer container = Messages.get().container(
Messages.GUI_REPORT_LANGUAGEC0PY_WARN_TARGETLOCALE_EXISTS_2,
new Object[] {m_copyresources[i], targetLocale});
report.addWarning(container);
} else {
content.copyLocale(sourceLocale, targetLocale);
if (m_delete) {
content.removeLocale(sourceLocale);
}
file.setContents(content.marshal());
CmsLock lock = cms.getLock(file);
if (lock.isInherited()) {
unlockInherited(file.getRootPath());
cms.lockResource(m_copyresources[i]);
} else {
if (lock.isNullLock()) {
cms.lockResource(m_copyresources[i]);
} else {
if (!lock.isLockableBy(cms.getRequestContext().getCurrentUser())) {
cms.changeLock(m_copyresources[i]);
}
}
}
cms.writeFile(file);
cms.unlockResource(m_copyresources[i]);
report.println(
org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_OK_0),
I_CmsReport.FORMAT_OK);
}
} catch (Throwable f) {
CmsMessageContainer error = Messages.get().container(
Messages.GUI_REPORT_LANGUAGEC0PY_ERROR_2,
new String[] {m_copyresources[i], CmsException.getStackTraceAsString(f)});
report.println(
org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_FAILED_0),
I_CmsReport.FORMAT_ERROR);
// report.println(f);
report.addError(error);
}
}
} |
python | def make_from_catalogue(cls, catalogue, spacing, dilate):
'''
Defines the grid on the basis of the catalogue
'''
new = cls()
cat_bbox = get_catalogue_bounding_polygon(catalogue)
if dilate > 0:
cat_bbox = cat_bbox.dilate(dilate)
# Define Grid spacing
new.update({'xmin': np.min(cat_bbox.lons),
'xmax': np.max(cat_bbox.lons),
'xspc': spacing,
'ymin': np.min(cat_bbox.lats),
'ymax': np.max(cat_bbox.lats),
'yspc': spacing,
'zmin': 0.,
'zmax': np.max(catalogue.data['depth']),
'zspc': np.max(catalogue.data['depth'])})
if new['zmin'] == new['zmax'] == new['zspc'] == 0:
new['zmax'] = new['zspc'] = 1
return new |
python | def minimize(self,
loss,
global_step=None,
var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
"""Adapted from TensorFlow Optimizer base class member function.
Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `tf.gradients()` and `self.apply_gradients()` explicitly instead
of using this function.
Args:
loss: A Tensor containing the value to minimize.
global_step: Optional Variable to increment by one after the variables
have been updated.
var_list: Optional list or tuple of Variable objects to update to
minimize loss. Defaults to the list of variables collected in
the graph under the key GraphKeys.TRAINABLE_VARIABLES.
gate_gradients: How to gate the computation of gradients.
Can be GATE_NONE, GATE_OP, or GATE_GRAPH.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class AggregationMethod.
colocate_gradients_with_ops: If True, try collocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A Tensor holding the gradient computed for loss.
Returns:
An Operation that updates the variables in var_list.
If global_step was not None, that operation also increments global_step.
Raises:
ValueError: if no gradients are provided for any variable.
"""
grads_and_vars = self._momentum_optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
for g, v in grads_and_vars:
print("g ", g)
print("v ", v)
return self.apply_gradients(grads_and_vars,
global_step=global_step,
name=name) |
python | def sill(self):
""" get the sill of the GeoStruct
Return
------
sill : float
the sill of the (nested) GeoStruct, including nugget and contribution
from each variogram
"""
sill = self.nugget
for v in self.variograms:
sill += v.contribution
return sill |
java | public static Class<?>[] toClass(final Object... array) {
if (array == null) {
return null;
} else if (array.length == 0) {
return ArrayUtils.EMPTY_CLASS_ARRAY;
}
final Class<?>[] classes = new Class<?>[array.length];
for (int i = 0; i < array.length; i++) {
classes[i] = array[i] == null ? null : array[i].getClass();
}
return classes;
} |
python | def discard_incoming_messages(self):
"""
Discard all incoming messages for the time of the context manager.
"""
# Flush any received message so far.
self.inbox.clear()
# This allows nesting of discard_incoming_messages() calls.
previous = self._discard_incoming_messages
self._discard_incoming_messages = True
try:
yield
finally:
self._discard_incoming_messages = previous |
java | public PoolGetAllLifetimeStatisticsOptions withOcpDate(DateTime ocpDate) {
if (ocpDate == null) {
this.ocpDate = null;
} else {
this.ocpDate = new DateTimeRfc1123(ocpDate);
}
return this;
} |
python | def insert_into_obj(self, data):
'''Insert text into selected object.
Args:
data: The data you want to insert.
Returns:
None
Raises:
None
'''
if not data:
data = ''
size = len(data)
n1 = size%256
n2 = size/256
self.send('^DI'+chr(n1)+chr(n2)+data) |
python | def backgroundCMD(self, catalog, mode='cloud-in-cells', weights=None):
"""
Generate an empirical background model in color-magnitude space.
INPUTS:
catalog: Catalog object
OUTPUTS:
background
"""
# Select objects in annulus
cut_annulus = self.roi.inAnnulus(catalog.lon,catalog.lat)
color = catalog.color[cut_annulus]
mag = catalog.mag[cut_annulus]
# Units are (deg^2)
solid_angle = ugali.utils.binning.take2D(self.solid_angle_cmd, color, mag,
self.roi.bins_color, self.roi.bins_mag)
# Weight each object before binning
# Divide by solid angle and bin size in magnitudes to get number density
# [objs / deg^2 / mag^2]
if weights is None:
number_density = (solid_angle*self.roi.delta_color*self.roi.delta_mag)**(-1)
else:
number_density = weights*(solid_angle*self.roi.delta_color*self.roi.delta_mag)**(-1)
mode = str(mode).lower()
if mode == 'cloud-in-cells':
# Apply cloud-in-cells algorithm
cmd_background = ugali.utils.binning.cloudInCells(color,mag,
[self.roi.bins_color,self.roi.bins_mag],
weights=number_density)[0]
elif mode == 'bootstrap':
# Not implemented
raise ValueError("Bootstrap mode not implemented")
mag_1_array = catalog.mag_1
mag_2_array = catalog.mag_2
catalog.mag_1 + (catalog.mag_1_err * np.random.normal(0, 1., len(catalog.mag_1)))
catalog.mag_2 + (catalog.mag_2_err * np.random.normal(0, 1., len(catalog.mag_2)))
elif mode == 'histogram':
# Apply raw histogram
cmd_background = np.histogram2d(mag,color,bins=[self.roi.bins_mag,self.roi.bins_color],
weights=number_density)[0]
elif mode == 'kde':
# Gridded kernel density estimator
logger.warning("### KDE not implemented properly")
cmd_background = ugali.utils.binning.kernelDensity(color,mag,
[self.roi.bins_color,self.roi.bins_mag],
weights=number_density)[0]
elif mode == 'uniform':
logger.warning("### WARNING: Uniform CMD")
hist = np.histogram2d(mag,color,bins=[self.roi.bins_mag,self.roi.bins_color], weights=number_density)[0]
cmd_background = np.mean(hist)*np.ones(hist.shape)
observable = (self.solid_angle_cmd > self.minimum_solid_angle)
cmd_background *= observable
return cmd_background
else:
raise ValueError("Unrecognized mode: %s"%mode)
## Account for the objects that spill out of the observable space
## But what about the objects that spill out to red colors??
#for index_color in range(0, len(self.roi.centers_color)):
# for index_mag in range(0, len(self.roi.centers_mag)):
# if self.solid_angle_cmd[index_mag][index_color] < self.minimum_solid_angle:
# cmd_background[index_mag - 1][index_color] += cmd_background[index_mag][index_color]
# cmd_background[index_mag][index_color] = 0.
# break
cmd_area = self.solid_angle_cmd*self.roi.delta_color*self.roi.delta_mag # [deg^2 * mag^2]
# ADW: This accounts for leakage to faint magnitudes
# But what about the objects that spill out to red colors??
# Maximum obsevable magnitude index for each color (uses the fact that
# np.argmin returns first minimum (zero) instance found.
# NOTE: More complicated maps may have holes causing problems
observable = (self.solid_angle_cmd > self.minimum_solid_angle)
index_mag = observable.argmin(axis=0) - 1
index_color = np.arange(len(self.roi.centers_color))
# Add the cumulative leakage back into the last bin of the CMD
leakage = (cmd_background * ~observable).sum(axis=0)
cmd_background[[index_mag,index_color]] += leakage
# Zero out all non-observable bins
cmd_background *= observable
# Avoid dividing by zero by setting empty bins to the value of the
# minimum filled bin of the CMD. This choice is arbitrary and
# could be replaced by a static minimum, some fraction of the
# CMD maximum, some median clipped minimum, etc. However, should
# be robust against outliers with very small values.
min_cmd_background = max(cmd_background[cmd_background > 0.].min(),
1e-4*cmd_background.max())
cmd_background[observable] = cmd_background[observable].clip(min_cmd_background)
### # ADW: This is a fudge factor introduced to renormalize the CMD
### # to the number of input stars in the annulus. While leakage
### # will still smooth the distribution, it shouldn't result in
### fudge_factor = len(mag) / float((cmd_background*cmd_area).sum())
### cmd_background *= fudge_factor
return cmd_background |
java | @Override
public R visitDocComment(DocCommentTree node, P p) {
return defaultAction(node, p);
} |
python | def _glyph_for_monomer_pattern(self, pattern):
"""Add glyph for a PySB MonomerPattern."""
pattern.matches_key = lambda: str(pattern)
agent_id = self._make_agent_id(pattern)
# Handle sources and sinks
if pattern.monomer.name in ('__source', '__sink'):
return None
# Handle molecules
glyph = emaker.glyph(emaker.label(text=pattern.monomer.name),
emaker.bbox(**self.monomer_style),
class_('macromolecule'), id=agent_id)
# Temporarily remove this
# Add a glyph for type
#type_glyph = emaker.glyph(emaker.label(text='mt:prot'),
# class_('unit of information'),
# emaker.bbox(**self.entity_type_style),
# id=self._make_id())
#glyph.append(type_glyph)
for site, value in pattern.site_conditions.items():
if value is None or isinstance(value, int):
continue
# Make some common abbreviations
if site == 'phospho':
site = 'p'
elif site == 'activity':
site = 'act'
if value == 'active':
value = 'a'
elif value == 'inactive':
value = 'i'
state = emaker.state(variable=site, value=value)
state_glyph = \
emaker.glyph(state, emaker.bbox(**self.entity_state_style),
class_('state variable'), id=self._make_id())
glyph.append(state_glyph)
return glyph |
python | def after_initial_login(self, response):
"""
This method is called *only* if the crawler is started with an
email and password combination.
It verifies that the login request was successful,
and then generates requests from `self.start_urls`.
"""
if LOGIN_FAILURE_MSG in response.text:
self.logger.error(
"Credentials failed. Either add/update the current credentials "
"or remove them to enable auto auth"
)
return
self.logger.info("successfully completed initial login")
if self.single_url:
yield scrapy.Request(
self.single_url,
callback=self.parse_item,
errback=self.handle_error
)
else:
for url in self.start_urls:
yield scrapy.Request(
url,
callback=self.analyze_url_list,
errback=self.handle_error
) |
java | protected FwAssistantDirector getAssistantDirector() {
if (cachedAssistantDirector != null) {
return cachedAssistantDirector;
}
synchronized (this) {
if (cachedAssistantDirector != null) {
return cachedAssistantDirector;
}
cachedAssistantDirector = ContainerUtil.getComponent(FwAssistantDirector.class);
}
return cachedAssistantDirector;
} |
python | def move_to_next_address(self, size_of_current):
"""Moves the register's current address to the next available.
size_of_current specifies how many bytes/words to skip"""
self._size_of_current_register_address = size_of_current
self._current_address = self.next_address()
self.mark_address(self._current_address, size_of_current) |
python | def resolve_resource_id_refs(self, input_dict, supported_resource_id_refs):
"""
Resolve resource references within a GetAtt dict.
Example:
{ "Fn::GetAtt": ["LogicalId", "Arn"] } => {"Fn::GetAtt": ["ResolvedLogicalId", "Arn"]}
Theoretically, only the first element of the array can contain reference to SAM resources. The second element
is name of an attribute (like Arn) of the resource.
However tools like AWS CLI apply the assumption that first element of the array is a LogicalId and cannot
contain a 'dot'. So they break at the first dot to convert YAML tag to JSON map like this:
`!GetAtt LogicalId.Arn` => {"Fn::GetAtt": [ "LogicalId", "Arn" ] }
Therefore to resolve the reference, we join the array into a string, break it back up to check if it contains
a known reference, and resolve it if we can.
:param input_dict: Dictionary to be resolved
:param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones.
:return: Resolved dictionary
"""
if not self.can_handle(input_dict):
return input_dict
key = self.intrinsic_name
value = input_dict[key]
# Value must be an array with *at least* two elements. If not, this is invalid GetAtt syntax. We just pass along
# the input to CFN for it to do the "official" validation.
if not isinstance(value, list) or len(value) < 2:
return input_dict
value_str = self._resource_ref_separator.join(value)
splits = value_str.split(self._resource_ref_separator)
logical_id = splits[0]
remaining = splits[1:] # if any
resolved_value = supported_resource_id_refs.get(logical_id)
return self._get_resolved_dictionary(input_dict, key, resolved_value, remaining) |
python | def _start_of_decade(self):
"""
Reset the date to the first day of the decade.
:rtype: Date
"""
year = self.year - self.year % YEARS_PER_DECADE
return self.set(year, 1, 1) |
java | public JobDisableOptions withIfModifiedSince(DateTime ifModifiedSince) {
if (ifModifiedSince == null) {
this.ifModifiedSince = null;
} else {
this.ifModifiedSince = new DateTimeRfc1123(ifModifiedSince);
}
return this;
} |
python | def _flush_bits_to_stream(self):
"""Flush the bits to the stream. This is used when
a few bits have been read and ``self._bits`` contains unconsumed/
flushed bits when data is to be written to the stream
"""
if len(self._bits) == 0:
return 0
bits = list(self._bits)
diff = 8 - (len(bits) % 8)
padding = [0] * diff
bits = bits + padding
self._stream.write(bits_to_bytes(bits))
self._bits.clear() |
python | def _create_variables(self, n_features):
"""Create the TensorFlow variables for the model.
:param n_features: number of features
:return: self
"""
w_name = 'weights'
self.W = tf.Variable(tf.truncated_normal(
shape=[n_features, self.num_hidden], stddev=0.1), name=w_name)
tf.summary.histogram(w_name, self.W)
bh_name = 'hidden-bias'
self.bh_ = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]),
name=bh_name)
tf.summary.histogram(bh_name, self.bh_)
bv_name = 'visible-bias'
self.bv_ = tf.Variable(tf.constant(0.1, shape=[n_features]),
name=bv_name)
tf.summary.histogram(bv_name, self.bv_) |
python | def OSXEnumerateRunningServicesFromClient(args):
"""Get running launchd jobs.
Args:
args: Unused.
Yields:
`rdf_client.OSXServiceInformation` instances.
Raises:
UnsupportedOSVersionError: for OS X earlier than 10.6.
"""
del args # Unused.
osx_version = client_utils_osx.OSXVersion()
version_array = osx_version.VersionAsMajorMinor()
if version_array[:2] < [10, 6]:
raise UnsupportedOSVersionError(
"ServiceManagement API unsupported on < 10.6. This client is %s" %
osx_version.VersionString())
launchd_list = GetRunningLaunchDaemons()
parser = osx_launchd.OSXLaunchdJobDict(launchd_list)
for job in parser.Parse():
response = CreateServiceProto(job)
yield response |
java | private void preserveAttributes(AlluxioURI srcPath, AlluxioURI dstPath)
throws IOException, AlluxioException {
if (mPreservePermissions) {
URIStatus srcStatus = mFileSystem.getStatus(srcPath);
mFileSystem.setAttribute(dstPath, SetAttributePOptions.newBuilder()
.setOwner(srcStatus.getOwner())
.setGroup(srcStatus.getGroup())
.setMode(new Mode((short) srcStatus.getMode()).toProto())
.build());
mFileSystem.setAcl(dstPath, SetAclAction.REPLACE, srcStatus.getAcl().getEntries());
}
} |
java | public ApplicationException wrap(Throwable cause) {
if (cause instanceof ApplicationException)
return (ApplicationException) cause;
this.withCause(cause);
return this;
} |
java | private void setUpAnimations() {
slideUp = AnimationUtils.loadAnimation(this,
R.anim.slide_up);
slideDown = AnimationUtils.loadAnimation(this,
R.anim.slide_down);
} |
java | public static PrimitiveIterator.OfDouble iterator(Spliterator.OfDouble spliterator) {
Objects.requireNonNull(spliterator);
class Adapter implements PrimitiveIterator.OfDouble, DoubleConsumer {
boolean valueReady = false;
double nextElement;
@Override
public void accept(double t) {
valueReady = true;
nextElement = t;
}
@Override
public boolean hasNext() {
if (!valueReady)
spliterator.tryAdvance(this);
return valueReady;
}
@Override
public double nextDouble() {
if (!valueReady && !hasNext())
throw new NoSuchElementException();
else {
valueReady = false;
return nextElement;
}
}
}
return new Adapter();
} |
python | def filter(self):
"""Generate a filtered query from request parameters.
:returns: Filtered SQLALchemy query
"""
argmap = {
filter.label or label: filter.field
for label, filter in self.filters.items()
}
args = self.opts.parser.parse(argmap)
query = self.query if self.query is not None else self.opts.query
for label, filter in self.filters.items():
value = args.get(filter.label or label)
if value is not None:
query = filter.filter(query, self.opts.model, label, value)
return query |
java | ArgumentMarshaller getVersionedArgumentMarshaller(final Method getter, Object getterReturnResult) {
synchronized (versionArgumentMarshallerCache) {
if ( !versionArgumentMarshallerCache.containsKey(getter) ) {
ArgumentMarshaller marshaller = null;
final Class<?> returnType = getter.getReturnType();
if ( BigInteger.class.isAssignableFrom(returnType) ) {
marshaller = new ArgumentMarshaller() {
@Override
public AttributeValue marshall(Object obj) {
if ( obj == null )
obj = BigInteger.ZERO;
Object newValue = ((BigInteger) obj).add(BigInteger.ONE);
return getArgumentMarshaller(getter).marshall(newValue);
}
};
} else if ( Integer.class.isAssignableFrom(returnType) ) {
marshaller = new ArgumentMarshaller() {
@Override
public AttributeValue marshall(Object obj) {
if ( obj == null )
obj = new Integer(0);
Object newValue = ((Integer) obj).intValue() + 1;
return getArgumentMarshaller(getter).marshall(newValue);
}
};
} else if ( Byte.class.isAssignableFrom(returnType) ) {
marshaller = new ArgumentMarshaller() {
@Override
public AttributeValue marshall(Object obj) {
if ( obj == null )
obj = new Byte((byte) 0);
Object newValue = (byte) ((((Byte) obj).byteValue() + 1) % Byte.MAX_VALUE);
return getArgumentMarshaller(getter).marshall(newValue);
}
};
} else if ( Long.class.isAssignableFrom(returnType) ) {
marshaller = new ArgumentMarshaller() {
@Override
public AttributeValue marshall(Object obj) {
if ( obj == null )
obj = new Long(0);
Object newValue = ((Long) obj).longValue() + 1L;
return getArgumentMarshaller(getter).marshall(newValue);
}
};
} else {
throw new DynamoDBMappingException("Unsupported parameter type for "
+ DynamoDBVersionAttribute.class + ": " + returnType + ". Must be a whole-number type.");
}
versionArgumentMarshallerCache.put(getter, marshaller);
}
}
return versionArgumentMarshallerCache.get(getter);
} |
java | public Analyzer getPropertyAnalyzer(String fieldName)
{
if (analyzers.containsKey(fieldName))
{
return analyzers.get(fieldName);
}
return null;
} |
java | public String getLabel(HasOther e) {
List<LangString> labels = ((HasLabel) e).getLabel();
if ((labels == null) || (labels.isEmpty()))
return null;
if (e instanceof HasLabel)
return labels.get(0).getValue();
return "pFact: label TODO";
} |
java | @Override
public void inAppNotificationDidShow(Context context, CTInAppNotification inAppNotification, Bundle formData) {
pushInAppNotificationStateEvent(false, inAppNotification, formData);
} |
java | public static void invokeProxied(final ProxiedAction action, final ClassLoader classLoader) throws Exception {
ProxiedAction proxy = (ProxiedAction) Proxy.newProxyInstance(classLoader, new Class<?>[] { ProxiedAction.class }, new InvocationHandler() {
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
action.run();
return Optional.empty();
}
});
proxy.run();
} |
java | public static void jacobian_Control3( DMatrixRMaj L_full ,
double beta[] , DMatrixRMaj A)
{
int indexA = 0;
double b0 = beta[0]; double b1 = beta[1]; double b2 = beta[2];
final double ld[] = L_full.data;
for( int i = 0; i < 3; i++ ) {
int li = L_full.numCols*i;
A.data[indexA++] = 2*ld[li+0]*b0 + ld[li+1]*b1 + ld[li+2]*b2;
A.data[indexA++] = ld[li+1]*b0 + 2*ld[li+3]*b1 + ld[li+4]*b2;
A.data[indexA++] = ld[li+2]*b0 + ld[li+4]*b1 + 2*ld[li+5]*b2;
}
} |
java | private void publish(Cache<K, V> cache, EventType eventType,
K key, @Nullable V oldValue, @Nullable V newValue, boolean quiet) {
if (dispatchQueues.isEmpty()) {
return;
}
JCacheEntryEvent<K, V> event = null;
for (Registration<K, V> registration : dispatchQueues.keySet()) {
if (!registration.getCacheEntryListener().isCompatible(eventType)) {
continue;
}
if (event == null) {
event = new JCacheEntryEvent<>(cache, eventType, key, oldValue, newValue);
}
if (!registration.getCacheEntryFilter().evaluate(event)) {
continue;
}
JCacheEntryEvent<K, V> e = event;
CompletableFuture<Void> future =
dispatchQueues.computeIfPresent(registration, (k, queue) -> {
Runnable action = () -> registration.getCacheEntryListener().dispatch(e);
return queue.thenRunAsync(action, executor);
});
if ((future != null) && registration.isSynchronous() && !quiet) {
pending.get().add(future);
}
}
} |
java | public Map<String, Object> toMap(List<QueryParameters> paramsList) {
Map<String, Object> result = null;
Iterator<QueryParameters> iterator = paramsList.iterator();
// skipping header
if (iterator.hasNext() == true) {
iterator.next();
}
if (iterator.hasNext() == true) {
result = this.toMap(iterator.next());
} else {
result = new HashMap<String, Object>();
}
return result;
} |
python | def seen_tasks(self):
"""Shows a list of seen task types."""
print('\n'.join(self._stub.seen_tasks(clearly_pb2.Empty()).task_types)) |
java | public static vpnformssoaction[] get(nitro_service service) throws Exception{
vpnformssoaction obj = new vpnformssoaction();
vpnformssoaction[] response = (vpnformssoaction[])obj.get_resources(service);
return response;
} |
python | def post(self, service, data):
"""Generic POST operation for sending data to Learning Modules API.
Data should be a JSON string or a dict. If it is not a string,
it is turned into a JSON string for the POST body.
Args:
service (str): The endpoint service to use, i.e. gradebook
data (json or dict): the data payload
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: the json-encoded content of the response
"""
url = self._url_format(service)
data = Base._data_to_json(data)
# Add content-type for body in POST.
headers = {'content-type': 'application/json'}
return self.rest_action(self._session.post, url,
data=data, headers=headers) |
python | def modify(self, dn: str, mod_list: dict) -> None:
"""
Modify a DN in the LDAP database; See ldap module. Doesn't return a
result if transactions enabled.
"""
_debug("modify", self, dn, mod_list)
# need to work out how to reverse changes in mod_list; result in revlist
revlist = {}
# get the current cached attributes
result = self._cache_get_for_dn(dn)
# find the how to reverse mod_list (for rollback) and put result in
# revlist. Also simulate actions on cache.
for mod_type, l in six.iteritems(mod_list):
for mod_op, mod_vals in l:
_debug("attribute:", mod_type)
if mod_type in result:
_debug("attribute cache:", result[mod_type])
else:
_debug("attribute cache is empty")
_debug("attribute modify:", (mod_op, mod_vals))
if mod_vals is not None:
if not isinstance(mod_vals, list):
mod_vals = [mod_vals]
if mod_op == ldap3.MODIFY_ADD:
# reverse of MODIFY_ADD is MODIFY_DELETE
reverse = (ldap3.MODIFY_DELETE, mod_vals)
elif mod_op == ldap3.MODIFY_DELETE and len(mod_vals) > 0:
# Reverse of MODIFY_DELETE is MODIFY_ADD, but only if value
# is given if mod_vals is None, this means all values where
# deleted.
reverse = (ldap3.MODIFY_ADD, mod_vals)
elif mod_op == ldap3.MODIFY_DELETE \
or mod_op == ldap3.MODIFY_REPLACE:
if mod_type in result:
# If MODIFY_DELETE with no values or MODIFY_REPLACE
# then we have to replace all attributes with cached
# state
reverse = (
ldap3.MODIFY_REPLACE,
tldap.modlist.escape_list(result[mod_type])
)
else:
# except if we have no cached state for this DN, in
# which case we delete it.
reverse = (ldap3.MODIFY_DELETE, [])
else:
raise RuntimeError("mod_op of %d not supported" % mod_op)
reverse = [reverse]
_debug("attribute reverse:", reverse)
if mod_type in result:
_debug("attribute cache:", result[mod_type])
else:
_debug("attribute cache is empty")
revlist[mod_type] = reverse
_debug("--")
_debug("mod_list:", mod_list)
_debug("revlist:", revlist)
_debug("--")
# now the hard stuff is over, we get to the easy stuff
def on_commit(obj):
obj.modify(dn, mod_list)
def on_rollback(obj):
obj.modify(dn, revlist)
return self._process(on_commit, on_rollback) |
python | def convert_shaders(convert, shaders):
""" Modify shading code so that we can write code once
and make it run "everywhere".
"""
# New version of the shaders
out = []
if convert == 'es2':
for isfragment, shader in enumerate(shaders):
has_version = False
has_prec_float = False
has_prec_int = False
lines = []
# Iterate over lines
for line in shader.lstrip().splitlines():
if line.startswith('#version'):
has_version = True
continue
if line.startswith('precision '):
has_prec_float = has_prec_float or 'float' in line
has_prec_int = has_prec_int or 'int' in line
lines.append(line.rstrip())
# Write
# BUG: fails on WebGL (Chrome)
# if True:
# lines.insert(has_version, '#line 0')
if not has_prec_float:
lines.insert(has_version, 'precision highp float;')
if not has_prec_int:
lines.insert(has_version, 'precision highp int;')
# BUG: fails on WebGL (Chrome)
# if not has_version:
# lines.insert(has_version, '#version 100')
out.append('\n'.join(lines))
elif convert == 'desktop':
for isfragment, shader in enumerate(shaders):
has_version = False
lines = []
# Iterate over lines
for line in shader.lstrip().splitlines():
has_version = has_version or line.startswith('#version')
if line.startswith('precision '):
line = ''
for prec in (' highp ', ' mediump ', ' lowp '):
line = line.replace(prec, ' ')
lines.append(line.rstrip())
# Write
if not has_version:
lines.insert(0, '#version 120\n')
out.append('\n'.join(lines))
else:
raise ValueError('Cannot convert shaders to %r.' % convert)
return tuple(out) |
java | @BetaApi(
"The surface for long-running operations is not stable yet and may change in the future.")
public final OperationFuture<Instance, Any> failoverInstanceAsync(
FailoverInstanceRequest request) {
return failoverInstanceOperationCallable().futureCall(request);
} |
java | private void calculateMinimumScaleToFit() {
float minimumScaleX = getWidth() / (float) getContentWidth();
float minimumScaleY = getHeight() / (float) getContentHeight();
float recalculatedMinScale = computeMinimumScaleForMode(minimumScaleX, minimumScaleY);
if (recalculatedMinScale != mEffectiveMinScale) {
mEffectiveMinScale = recalculatedMinScale;
if (mScale < mEffectiveMinScale) {
setScale(mEffectiveMinScale);
}
}
} |
java | public void run() {
Protos.FrameworkInfo.Builder frameworkInfo = Protos.FrameworkInfo.newBuilder()
.setName("alluxio").setCheckpoint(true);
if (ServerConfiguration.isSet(PropertyKey.INTEGRATION_MESOS_ROLE)) {
frameworkInfo.setRole(ServerConfiguration.get(PropertyKey.INTEGRATION_MESOS_ROLE));
}
if (ServerConfiguration.isSet(PropertyKey.INTEGRATION_MESOS_USER)) {
frameworkInfo.setUser(ServerConfiguration.get(PropertyKey.INTEGRATION_MESOS_USER));
} else {
// Setting the user to an empty string will prompt Mesos to set it to the current user.
frameworkInfo.setUser("");
}
if (ServerConfiguration.isSet(PropertyKey.INTEGRATION_MESOS_PRINCIPAL)) {
frameworkInfo.setPrincipal(ServerConfiguration.get(PropertyKey.INTEGRATION_MESOS_PRINCIPAL));
}
// Publish WebUI url to mesos master.
String masterWebUrl = createMasterWebUrl();
frameworkInfo.setWebuiUrl(masterWebUrl);
Scheduler scheduler = new AlluxioScheduler(mAlluxioMasterHostname);
Protos.Credential cred = createCredential();
MesosSchedulerDriver driver;
if (cred == null) {
driver = new MesosSchedulerDriver(scheduler, frameworkInfo.build(), mMesosMaster);
} else {
driver = new MesosSchedulerDriver(scheduler, frameworkInfo.build(), mMesosMaster, cred);
}
int status = driver.run() == Protos.Status.DRIVER_STOPPED ? 0 : 1;
System.exit(status);
} |
java | @Override
public void rollback(Savepoint savepoint) throws SQLException
{
delegate.rollback(savepoint);
isCommitStateDirty = false;
lastAccess = currentTime();
} |
java | private void completeCall(ClientResponseImpl response) {
// if we're keeping track, calculate result size
if (m_perCallStats.samplingProcedure()) {
m_perCallStats.setResultSize(response.getResults());
}
m_statsCollector.endProcedure(response.getStatus() == ClientResponse.USER_ABORT,
(response.getStatus() != ClientResponse.USER_ABORT) &&
(response.getStatus() != ClientResponse.SUCCESS),
m_perCallStats);
// allow the GC to collect per-call stats if this proc isn't called for a while
m_perCallStats = null;
// send the response to the caller
// must be done as IRM to CI mailbox for backpressure accounting
response.setClientHandle(m_clientHandle);
InitiateResponseMessage irm = InitiateResponseMessage.messageForNTProcResponse(m_ciHandle,
m_ccxn.connectionId(),
response);
m_mailbox.deliver(irm);
m_ntProcService.handleNTProcEnd(ProcedureRunnerNT.this);
} |
python | def get_loaded_rules(rules_paths):
"""Yields all available rules.
:type rules_paths: [Path]
:rtype: Iterable[Rule]
"""
for path in rules_paths:
if path.name != '__init__.py':
rule = Rule.from_path(path)
if rule.is_enabled:
yield rule |
java | public int[] transferValues(int state, int codePoint)
{
if (state < 1)
{
return EMPTY_WALK_STATE;
}
if ((state != 1) && (isEmpty(state)))
{
return EMPTY_WALK_STATE;
}
int[] ids = this.charMap.toIdList(codePoint);
if (ids.length == 0)
{
return EMPTY_WALK_STATE;
}
for (int i = 0; i < ids.length; i++)
{
int c = ids[i];
if ((getBase(state) + c < getBaseArraySize())
&& (getCheck(getBase(state) + c) == state))
{
state = getBase(state) + c;
}
else
{
return EMPTY_WALK_STATE;
}
}
if (getCheck(getBase(state) + UNUSED_CHAR_VALUE) == state)
{
int value = getLeafValue(getBase(getBase(state)
+ UNUSED_CHAR_VALUE));
return new int[]{state, value};
}
return new int[]{state, -1};
} |
python | def detached(name,
rev,
target=None,
remote='origin',
user=None,
password=None,
force_clone=False,
force_checkout=False,
fetch_remote=True,
hard_reset=False,
submodules=False,
identity=None,
https_user=None,
https_pass=None,
onlyif=None,
unless=None,
output_encoding=None,
**kwargs):
'''
.. versionadded:: 2016.3.0
Make sure a repository is cloned to the given target directory and is
a detached HEAD checkout of the commit ID resolved from ``rev``.
name
Address of the remote repository.
rev
The branch, tag, or commit ID to checkout after clone.
If a branch or tag is specified it will be resolved to a commit ID
and checked out.
target
Name of the target directory where repository is about to be cloned.
remote : origin
Git remote to use. If this state needs to clone the repo, it will clone
it using this value as the initial remote name. If the repository
already exists, and a remote by this name is not present, one will be
added.
user
User under which to run git commands. By default, commands are run by
the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
force_clone : False
If the ``target`` directory exists and is not a git repository, then
this state will fail. Set this argument to ``True`` to remove the
contents of the target directory and clone the repo into it.
force_checkout : False
When checking out the revision ID, the state will fail if there are
unwritten changes. Set this argument to ``True`` to discard unwritten
changes when checking out.
fetch_remote : True
If ``False`` a fetch will not be performed and only local refs
will be reachable.
hard_reset : False
If ``True`` a hard reset will be performed before the checkout and any
uncommitted modifications to the working directory will be discarded.
Untracked files will remain in place.
.. note::
Changes resulting from a hard reset will not trigger requisites.
submodules : False
Update submodules
identity
A path on the minion (or a SaltStack fileserver URL, e.g.
``salt://path/to/identity_file``) to a private key to use for SSH
authentication.
https_user
HTTP Basic Auth username for HTTPS (only) clones
https_pass
HTTP Basic Auth password for HTTPS (only) clones
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
return _fail(
ret,
salt.utils.args.invalid_kwargs(kwargs, raise_exc=False)
)
if not rev:
return _fail(
ret,
'\'{0}\' is not a valid value for the \'rev\' argument'.format(rev)
)
if not target:
return _fail(
ret,
'\'{0}\' is not a valid value for the \'target\' argument'.format(rev)
)
# Ensure that certain arguments are strings to ensure that comparisons work
if not isinstance(rev, six.string_types):
rev = six.text_type(rev)
if target is not None:
if not isinstance(target, six.string_types):
target = six.text_type(target)
if not os.path.isabs(target):
return _fail(
ret,
'Target \'{0}\' is not an absolute path'.format(target)
)
if user is not None and not isinstance(user, six.string_types):
user = six.text_type(user)
if remote is not None and not isinstance(remote, six.string_types):
remote = six.text_type(remote)
if identity is not None:
if isinstance(identity, six.string_types):
identity = [identity]
elif not isinstance(identity, list):
return _fail(ret, 'Identity must be either a list or a string')
identity = [os.path.expanduser(x) for x in identity]
for ident_path in identity:
if 'salt://' in ident_path:
try:
ident_path = __salt__['cp.cache_file'](ident_path)
except IOError as exc:
log.error('Failed to cache %s: %s', ident_path, exc)
return _fail(
ret,
'Identity \'{0}\' does not exist.'.format(
ident_path
)
)
if not os.path.isabs(ident_path):
return _fail(
ret,
'Identity \'{0}\' is not an absolute path'.format(
ident_path
)
)
if https_user is not None and not isinstance(https_user, six.string_types):
https_user = six.text_type(https_user)
if https_pass is not None and not isinstance(https_pass, six.string_types):
https_pass = six.text_type(https_pass)
if os.path.isfile(target):
return _fail(
ret,
'Target \'{0}\' exists and is a regular file, cannot proceed'
.format(target)
)
try:
desired_fetch_url = salt.utils.url.add_http_basic_auth(
name,
https_user,
https_pass,
https_only=True
)
except ValueError as exc:
return _fail(ret, exc.__str__())
redacted_fetch_url = salt.utils.url.redact_http_basic_auth(desired_fetch_url)
# Check if onlyif or unless conditions match
run_check_cmd_kwargs = {'runas': user}
if 'shell' in __grains__:
run_check_cmd_kwargs['shell'] = __grains__['shell']
cret = mod_run_check(
run_check_cmd_kwargs, onlyif, unless
)
if isinstance(cret, dict):
ret.update(cret)
return ret
# Determine if supplied ref is a hash
remote_rev_type = 'ref'
if len(rev) <= 40 \
and all(x in string.hexdigits for x in rev):
rev = rev.lower()
remote_rev_type = 'hash'
comments = []
hash_exists_locally = False
local_commit_id = None
gitdir = os.path.join(target, '.git')
if os.path.isdir(gitdir) \
or __salt__['git.is_worktree'](target,
user=user,
password=password,
output_encoding=output_encoding):
# Target directory is a git repository or git worktree
local_commit_id = _get_local_rev_and_branch(
target,
user,
password,
output_encoding=output_encoding)[0]
if remote_rev_type is 'hash':
try:
__salt__['git.describe'](target,
rev,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding)
except CommandExecutionError:
hash_exists_locally = False
else:
# The rev is a hash and it exists locally so skip to checkout
hash_exists_locally = True
else:
# Check that remote is present and set to correct url
remotes = __salt__['git.remotes'](target,
user=user,
password=password,
redact_auth=False,
output_encoding=output_encoding)
if remote in remotes and name in remotes[remote]['fetch']:
pass
else:
# The fetch_url for the desired remote does not match the
# specified URL (or the remote does not exist), so set the
# remote URL.
current_fetch_url = None
if remote in remotes:
current_fetch_url = remotes[remote]['fetch']
if __opts__['test']:
return _neutral_test(
ret,
'Remote {0} would be set to {1}'.format(
remote, name
)
)
__salt__['git.remote_set'](target,
url=name,
remote=remote,
user=user,
password=password,
https_user=https_user,
https_pass=https_pass,
output_encoding=output_encoding)
comments.append(
'Remote {0} updated from \'{1}\' to \'{2}\''.format(
remote,
current_fetch_url,
name
)
)
else:
# Clone repository
if os.path.isdir(target):
target_contents = os.listdir(target)
if force_clone:
# Clone is required, and target directory exists, but the
# ``force`` option is enabled, so we need to clear out its
# contents to proceed.
if __opts__['test']:
return _neutral_test(
ret,
'Target directory {0} exists. Since force_clone=True, '
'the contents of {0} would be deleted, and {1} would '
'be cloned into this directory.'.format(target, name)
)
log.debug(
'Removing contents of %s to clone repository %s in its '
'place (force_clone=True set in git.detached state)',
target, name
)
removal_errors = {}
for target_object in target_contents:
target_path = os.path.join(target, target_object)
try:
salt.utils.files.rm_rf(target_path)
except OSError as exc:
if exc.errno != errno.ENOENT:
removal_errors[target_path] = exc
if removal_errors:
err_strings = [
' {0}\n {1}'.format(k, v)
for k, v in six.iteritems(removal_errors)
]
return _fail(
ret,
'Unable to remove\n{0}'.format('\n'.join(err_strings)),
comments
)
ret['changes']['forced clone'] = True
elif target_contents:
# Clone is required, but target dir exists and is non-empty. We
# can't proceed.
return _fail(
ret,
'Target \'{0}\' exists, is non-empty and is not a git '
'repository. Set the \'force_clone\' option to True to '
'remove this directory\'s contents and proceed with '
'cloning the remote repository'.format(target)
)
log.debug('Target %s is not found, \'git clone\' is required', target)
if __opts__['test']:
return _neutral_test(
ret,
'Repository {0} would be cloned to {1}'.format(
name, target
)
)
try:
clone_opts = ['--no-checkout']
if remote != 'origin':
clone_opts.extend(['--origin', remote])
__salt__['git.clone'](target,
name,
user=user,
password=password,
opts=clone_opts,
identity=identity,
https_user=https_user,
https_pass=https_pass,
saltenv=__env__,
output_encoding=output_encoding)
comments.append('{0} cloned to {1}'.format(name, target))
except Exception as exc:
log.error(
'Unexpected exception in git.detached state',
exc_info=True
)
if isinstance(exc, CommandExecutionError):
msg = _strip_exc(exc)
else:
msg = six.text_type(exc)
return _fail(ret, msg, comments)
# Repository exists and is ready for fetch/checkout
refspecs = [
'refs/heads/*:refs/remotes/{0}/*'.format(remote),
'+refs/tags/*:refs/tags/*'
]
if hash_exists_locally or fetch_remote is False:
pass
else:
# Fetch refs from remote
if __opts__['test']:
return _neutral_test(
ret,
'Repository remote {0} would be fetched'.format(remote)
)
try:
fetch_changes = __salt__['git.fetch'](
target,
remote=remote,
force=True,
refspecs=refspecs,
user=user,
password=password,
identity=identity,
saltenv=__env__,
output_encoding=output_encoding)
except CommandExecutionError as exc:
msg = 'Fetch failed'
msg += ':\n\n' + six.text_type(exc)
return _fail(ret, msg, comments)
else:
if fetch_changes:
comments.append(
'Remote {0} was fetched, resulting in updated '
'refs'.format(remote)
)
# get refs and checkout
checkout_commit_id = ''
if remote_rev_type is 'hash':
if __salt__['git.describe'](
target,
rev,
user=user,
password=password,
output_encoding=output_encoding):
checkout_commit_id = rev
else:
return _fail(
ret,
'Revision \'{0}\' does not exist'.format(rev)
)
else:
try:
all_remote_refs = __salt__['git.remote_refs'](
target,
user=user,
password=password,
identity=identity,
https_user=https_user,
https_pass=https_pass,
ignore_retcode=False,
output_encoding=output_encoding)
if 'refs/remotes/'+remote+'/'+rev in all_remote_refs:
checkout_commit_id = all_remote_refs['refs/remotes/' + remote + '/' + rev]
elif 'refs/tags/' + rev in all_remote_refs:
checkout_commit_id = all_remote_refs['refs/tags/' + rev]
else:
return _fail(
ret,
'Revision \'{0}\' does not exist'.format(rev)
)
except CommandExecutionError as exc:
return _fail(
ret,
'Failed to list refs for {0}: {1}'.format(remote, _strip_exc(exc))
)
if hard_reset:
if __opts__['test']:
return _neutral_test(
ret,
'Hard reset to HEAD would be performed on {0}'.format(target)
)
__salt__['git.reset'](
target,
opts=['--hard', 'HEAD'],
user=user,
password=password,
output_encoding=output_encoding)
comments.append(
'Repository was reset to HEAD before checking out revision'
)
# TODO: implement clean function for git module and add clean flag
if checkout_commit_id == local_commit_id:
new_rev = None
else:
if __opts__['test']:
ret['changes']['HEAD'] = {'old': local_commit_id, 'new': checkout_commit_id}
return _neutral_test(
ret,
'Commit ID {0} would be checked out at {1}'.format(
checkout_commit_id,
target
)
)
__salt__['git.checkout'](target,
checkout_commit_id,
force=force_checkout,
user=user,
password=password,
output_encoding=output_encoding)
comments.append(
'Commit ID {0} was checked out at {1}'.format(
checkout_commit_id,
target
)
)
try:
new_rev = __salt__['git.revision'](
cwd=target,
user=user,
password=password,
ignore_retcode=True,
output_encoding=output_encoding)
except CommandExecutionError:
new_rev = None
if submodules:
__salt__['git.submodule'](target,
'update',
opts=['--init', '--recursive'],
user=user,
password=password,
identity=identity,
output_encoding=output_encoding)
comments.append(
'Submodules were updated'
)
if new_rev is not None:
ret['changes']['HEAD'] = {'old': local_commit_id, 'new': new_rev}
else:
comments.append("Already checked out at correct revision")
msg = _format_comments(comments)
log.info(msg)
ret['comment'] = msg
return ret |
java | public alluxio.grpc.MountPOptions getProperties() {
return properties_ == null ? alluxio.grpc.MountPOptions.getDefaultInstance() : properties_;
} |
python | def _exception_free_callback(self, callback, *args, **kwargs):
""" A wrapper that remove all exceptions raised from hooks """
try:
return callback(*args, **kwargs)
except Exception:
self._logger.exception("An exception occurred while calling a hook! ",exc_info=True)
return None |
python | def off(self):
"""Send the Off command to an X10 device."""
msg = X10Send.unit_code_msg(self.address.x10_housecode,
self.address.x10_unitcode)
self._send_method(msg)
msg = X10Send.command_msg(self.address.x10_housecode,
X10_COMMAND_OFF)
self._send_method(msg, False)
self._update_subscribers(0x00) |
java | @Nullable
public static String replaceMacro( @CheckForNull String s, @Nonnull Map<String,String> properties) {
return replaceMacro(s, new VariableResolver.ByMap<>(properties));
} |
python | def close(self):
"""
Closes the handle opened by open().
The remapped function is WinDivertClose::
BOOL WinDivertClose(
__in HANDLE handle
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_close
"""
if not self.is_open:
raise RuntimeError("WinDivert handle is not open.")
windivert_dll.WinDivertClose(self._handle)
self._handle = None |
java | public UserPartAvailableMessage createUPA(int cic) {
UserPartAvailableMessage msg = createUPA();
CircuitIdentificationCode code = this.parameterFactory.createCircuitIdentificationCode();
code.setCIC(cic);
msg.setCircuitIdentificationCode(code);
return msg;
} |
java | public static double rmsd(Point3d[] x, Point3d[] y) {
if (x.length != y.length) {
throw new IllegalArgumentException(
"Point arrays are not of the same length.");
}
double sum = 0.0;
for (int i = 0; i < x.length; i++) {
sum += x[i].distanceSquared(y[i]);
}
return Math.sqrt(sum / x.length);
} |
python | def add_task_status(self, name, **attrs):
"""
Add a Task status to the project and returns a
:class:`TaskStatus` object.
:param name: name of the :class:`TaskStatus`
:param attrs: optional attributes for :class:`TaskStatus`
"""
return TaskStatuses(self.requester).create(self.id, name, **attrs) |
java | protected void submitCycleCompletionEvent() {
if (!this.lowWatermark.equalsIgnoreCase(ComplianceConfigurationKeys.NO_PREVIOUS_WATERMARK)) {
return;
}
if (this.executionCount > 1) {
// Cycle completed
Map<String, String> metadata = new HashMap<>();
metadata.put(ComplianceConfigurationKeys.TOTAL_EXECUTIONS, Integer.toString((this.executionCount - 1)));
this.eventSubmitter.submit(ComplianceEvents.Purger.CYCLE_COMPLETED, metadata);
this.executionCount = ComplianceConfigurationKeys.DEFAULT_EXECUTION_COUNT;
}
} |
java | public String getLanguage(boolean bCheckLocaleAlso)
{
String strLanguage = this.getProperty(Params.LANGUAGE);
if ((strLanguage == null) || (strLanguage.length() == 0))
if (bCheckLocaleAlso)
return Locale.getDefault().getLanguage();
return strLanguage;
} |
python | def get_row_height(self, row, tab):
"""Returns row height"""
try:
return self.row_heights[(row, tab)]
except KeyError:
return config["default_row_height"] |
python | def __get_distribution_tags(self, client, arn):
"""Returns a dict containing the tags for a CloudFront distribution
Args:
client (botocore.client.CloudFront): Boto3 CloudFront client object
arn (str): ARN of the distribution to get tags for
Returns:
`dict`
"""
return {
t['Key']: t['Value'] for t in client.list_tags_for_resource(
Resource=arn
)['Tags']['Items']
} |
python | def connect(self, address):
"""
Connect to a remote or local gpiod daemon.
:param address: a pair (address, port), the address must be already
resolved (for example an ip address)
:return:
"""
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setblocking(False)
# Disable the Nagle algorithm.
self.s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
yield from self._loop.sock_connect(self.s, address)
yield from self._notify._connect(address) |
python | def _parse_sigmak(line, lines):
"""Parse Energy, Re sigma xx, Im sigma xx, Re sigma zz, Im sigma zz"""
split_line = line.split()
energy = float(split_line[0])
re_sigma_xx = float(split_line[1])
im_sigma_xx = float(split_line[2])
re_sigma_zz = float(split_line[3])
im_sigma_zz = float(split_line[4])
return {"energy": energy, "re_sigma_xx": re_sigma_xx, "im_sigma_xx": im_sigma_xx, "re_sigma_zz": re_sigma_zz,
"im_sigma_zz": im_sigma_zz} |
java | @SuppressWarnings("unchecked")
protected final void updateComponentType(E newElement) {
final Class<? extends E> lclazz = (Class<? extends E>) newElement.getClass();
this.clazz = (Class<? extends E>) ReflectionUtil.getCommonType(this.clazz, lclazz);
} |
python | def unregister(self, observer):
"""
Remove the observers of the observers list.
It will not receive any more notifications when occurs changes.
:param UpdatesObserver observer: Observer you will not receive any more notifications then
occurs changes.
"""
self.observer_manager.observers.remove(observer)
observer.manager = None |
python | def overview(index, start, end):
"""Compute metrics in the overview section for enriched git indexes.
Returns a dictionary. Each key in the dictionary is the name of
a metric, the value is the value of that metric. Value can be
a complex object (eg, a time series).
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
"""
results = {
"activity_metrics": [Commits(index, start, end)],
"author_metrics": [Authors(index, start, end)],
"bmi_metrics": [],
"time_to_close_metrics": [],
"projects_metrics": []
}
return results |
python | def GetData(EPIC, season=None, cadence='lc', clobber=False, delete_raw=False,
aperture_name='k2sff_15', saturated_aperture_name='k2sff_19',
max_pixels=75, download_only=False, saturation_tolerance=-0.1,
bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17],
get_hires=True,
get_nearby=True, **kwargs):
'''
Returns a :py:obj:`DataContainer` instance with the
raw data for the target.
:param int EPIC: The EPIC ID number
:param int season: The observing season (campaign). Default :py:obj:`None`
:param str cadence: The light curve cadence. Default `lc`
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param bool delete_raw: Delete the FITS TPF after processing it? \
Default :py:obj:`False`
:param str aperture_name: The name of the aperture to use. Select \
`custom` to call :py:func:`GetCustomAperture`. Default `k2sff_15`
:param str saturated_aperture_name: The name of the aperture to use if \
the target is saturated. Default `k2sff_19`
:param int max_pixels: Maximum number of pixels in the TPF. Default 75
:param bool download_only: Download raw TPF and return? Default \
:py:obj:`False`
:param float saturation_tolerance: Target is considered saturated \
if flux is within this fraction of the pixel well depth. \
Default -0.1
:param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \
outliers when computing the model. \
Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]`
:param bool get_hires: Download a high resolution image of the target? \
Default :py:obj:`True`
:param bool get_nearby: Retrieve location of nearby sources? \
Default :py:obj:`True`
'''
# Campaign no.
if season is None:
campaign = Season(EPIC)
if hasattr(campaign, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s."
% campaign)
else:
campaign = season
# Is there short cadence data available for this target?
short_cadence = HasShortCadence(EPIC, season=campaign)
if cadence == 'sc' and not short_cadence:
raise ValueError("Short cadence data not available for this target.")
# Local file name
filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign,
('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:],
'data.npz')
# Download?
if clobber or not os.path.exists(filename):
# Get the TPF
tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files',
str(EPIC), 'ktwo%09d-c%02d_lpd-targ.fits.gz'
% (EPIC, campaign))
sc_tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files',
str(EPIC), 'ktwo%09d-c%02d_spd-targ.fits.gz'
% (EPIC, campaign))
if clobber or not os.path.exists(tpf):
kplr_client.k2_star(EPIC).get_target_pixel_files(fetch=True)
with pyfits.open(tpf) as f:
qdata = f[1].data
# Get the TPF aperture
tpf_aperture = (f[2].data & 2) // 2
# Get the enlarged TPF aperture
tpf_big_aperture = np.array(tpf_aperture)
for i in range(tpf_big_aperture.shape[0]):
for j in range(tpf_big_aperture.shape[1]):
if f[2].data[i][j] == 1:
for n in [(i - 1, j), (i + 1, j),
(i, j - 1), (i, j + 1)]:
if n[0] >= 0 and n[0] < tpf_big_aperture.shape[0]:
if n[1] >= 0 and n[1] < \
tpf_big_aperture.shape[1]:
if tpf_aperture[n[0]][n[1]] == 1:
tpf_big_aperture[i][j] = 1
# Is there short cadence data?
if short_cadence:
with pyfits.open(sc_tpf) as f:
sc_qdata = f[1].data
# Get K2SFF apertures
try:
k2sff = kplr.K2SFF(EPIC, sci_campaign=campaign)
k2sff_apertures = k2sff.apertures
if delete_raw:
os.remove(k2sff._file)
except:
k2sff_apertures = [None for i in range(20)]
# Make a dict of all our apertures
# We're not getting K2SFF apertures 0-9 any more
apertures = {'tpf': tpf_aperture, 'tpf_big': tpf_big_aperture}
for i in range(10, 20):
apertures.update({'k2sff_%02d' % i: k2sff_apertures[i]})
# Get the header info
fitsheader = [pyfits.getheader(tpf, 0).cards,
pyfits.getheader(tpf, 1).cards,
pyfits.getheader(tpf, 2).cards]
if short_cadence:
sc_fitsheader = [pyfits.getheader(sc_tpf, 0).cards,
pyfits.getheader(sc_tpf, 1).cards,
pyfits.getheader(sc_tpf, 2).cards]
else:
sc_fitsheader = None
# Get a hi res image of the target
if get_hires:
hires = GetHiResImage(EPIC)
else:
hires = None
# Get nearby sources
if get_nearby:
nearby = GetSources(EPIC)
else:
nearby = []
# Delete?
if delete_raw:
os.remove(tpf)
if short_cadence:
os.remove(sc_tpf)
# Get the arrays
cadn = np.array(qdata.field('CADENCENO'), dtype='int32')
time = np.array(qdata.field('TIME'), dtype='float64')
fpix = np.array(qdata.field('FLUX'), dtype='float64')
fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64')
qual = np.array(qdata.field('QUALITY'), dtype=int)
# Get rid of NaNs in the time array by interpolating
naninds = np.where(np.isnan(time))
time = Interpolate(np.arange(0, len(time)), naninds, time)
# Get the motion vectors (if available!)
pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64')
pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64')
if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)):
pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1)
pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2)
else:
pc1 = None
pc2 = None
# Do the same for short cadence
if short_cadence:
sc_cadn = np.array(sc_qdata.field('CADENCENO'), dtype='int32')
sc_time = np.array(sc_qdata.field('TIME'), dtype='float64')
sc_fpix = np.array(sc_qdata.field('FLUX'), dtype='float64')
sc_fpix_err = np.array(sc_qdata.field('FLUX_ERR'), dtype='float64')
sc_qual = np.array(sc_qdata.field('QUALITY'), dtype=int)
sc_naninds = np.where(np.isnan(sc_time))
sc_time = Interpolate(
np.arange(0, len(sc_time)), sc_naninds, sc_time)
sc_pc1 = np.array(sc_qdata.field('POS_CORR1'), dtype='float64')
sc_pc2 = np.array(sc_qdata.field('POS_CORR2'), dtype='float64')
if not np.all(np.isnan(sc_pc1)) and not np.all(np.isnan(sc_pc2)):
sc_pc1 = Interpolate(
sc_time, np.where(np.isnan(sc_pc1)), sc_pc1)
sc_pc2 = Interpolate(
sc_time, np.where(np.isnan(sc_pc2)), sc_pc2)
else:
sc_pc1 = None
sc_pc2 = None
else:
sc_cadn = None
sc_time = None
sc_fpix = None
sc_fpix_err = None
sc_qual = None
sc_pc1 = None
sc_pc2 = None
# Static pixel images for plotting
pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]]
# Atomically write to disk.
# http://stackoverflow.com/questions/2333872/
# atomic-writing-to-file-with-python
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
f = NamedTemporaryFile("wb", delete=False)
np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix,
fpix_err=fpix_err,
qual=qual, apertures=apertures,
pc1=pc1, pc2=pc2, fitsheader=fitsheader,
pixel_images=pixel_images, nearby=nearby,
hires=hires,
sc_cadn=sc_cadn, sc_time=sc_time, sc_fpix=sc_fpix,
sc_fpix_err=sc_fpix_err, sc_qual=sc_qual,
sc_pc1=sc_pc1, sc_pc2=sc_pc2,
sc_fitsheader=sc_fitsheader)
f.flush()
os.fsync(f.fileno())
f.close()
shutil.move(f.name, filename)
if download_only:
return
# Load
data = np.load(filename)
apertures = data['apertures'][()]
pixel_images = data['pixel_images']
nearby = data['nearby']
hires = data['hires'][()]
if cadence == 'lc':
fitsheader = data['fitsheader']
cadn = data['cadn']
time = data['time']
fpix = data['fpix']
fpix_err = data['fpix_err']
qual = data['qual']
pc1 = data['pc1']
pc2 = data['pc2']
elif cadence == 'sc':
fitsheader = data['sc_fitsheader']
cadn = data['sc_cadn']
time = data['sc_time']
fpix = data['sc_fpix']
fpix_err = data['sc_fpix_err']
qual = data['sc_qual']
pc1 = data['sc_pc1']
pc2 = data['sc_pc2']
else:
raise ValueError("Invalid value for the cadence.")
# Select the "saturated aperture" to check if the star is saturated
# If it is, we will use this aperture instead
if saturated_aperture_name == 'custom':
saturated_aperture = GetCustomAperture(data)
else:
if saturated_aperture_name is None:
saturated_aperture_name = 'k2sff_19'
saturated_aperture = apertures[saturated_aperture_name]
if saturated_aperture is None:
log.error("Invalid aperture selected. Defaulting to `tpf_big`.")
saturated_aperture_name = 'tpf_big'
saturated_aperture = apertures[saturated_aperture_name]
# HACK: Some C05 K2SFF apertures don't match the target pixel file
# pixel grid size. This is likely because they're defined on the M67
# superstamp. For now, let's ignore these stars.
if saturated_aperture.shape != fpix.shape[1:]:
log.error("Aperture size mismatch!")
return None
# Compute the saturation flux and the 97.5th percentile
# flux in each pixel of the saturated aperture. We're going
# to compare these to decide if the star is saturated.
satflx = SaturationFlux(EPIC, campaign=campaign) * \
(1. + saturation_tolerance)
f97 = np.zeros((fpix.shape[1], fpix.shape[2]))
for i in range(fpix.shape[1]):
for j in range(fpix.shape[2]):
if saturated_aperture[i, j]:
# Let's remove NaNs...
tmp = np.delete(fpix[:, i, j], np.where(
np.isnan(fpix[:, i, j])))
# ... and really bad outliers...
if len(tmp):
f = SavGol(tmp)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
bad = np.where((f > med + 10. * MAD) |
(f < med - 10. * MAD))[0]
np.delete(tmp, bad)
# ... so we can compute the 97.5th percentile flux
i97 = int(0.975 * len(tmp))
tmp = tmp[np.argsort(tmp)[i97]]
f97[i, j] = tmp
# Check if any of the pixels are actually saturated
if np.nanmax(f97) <= satflx:
log.info("No saturated columns detected.")
saturated = False
else:
log.info("Saturated pixel(s) found. Switching to aperture `%s`." %
saturated_aperture_name)
aperture_name = saturated_aperture_name
saturated = True
# Now grab the aperture we'll actually use
if aperture_name == 'custom':
aperture = GetCustomAperture(data)
else:
if aperture_name is None:
aperture_name = 'k2sff_15'
aperture = apertures[aperture_name]
if aperture is None:
log.error("Invalid aperture selected. Defaulting to `tpf_big`.")
aperture_name = 'tpf_big'
aperture = apertures[aperture_name]
# HACK: Some C05 K2SFF apertures don't match the target pixel file
# pixel grid size. This is likely because they're defined on the M67
# superstamp. For now, let's ignore these stars.
if aperture.shape != fpix.shape[1:]:
log.error("Aperture size mismatch!")
return None
# Now we check if the aperture is too big. Can lead to memory errors...
# Treat saturated and unsaturated stars differently.
if saturated:
# Need to check if we have too many pixels *after* collapsing columns.
# Sort the apertures in decreasing order of pixels, but keep the apert.
# chosen by the user first.
aperture_names = np.array(list(apertures.keys()))
npix_per_aperture = np.array(
[np.sum(apertures[k]) for k in aperture_names])
aperture_names = aperture_names[np.argsort(npix_per_aperture)[::-1]]
aperture_names = np.append([aperture_name], np.delete(
aperture_names, np.argmax(aperture_names == aperture_name)))
# Loop through them. Pick the first one that satisfies
# the `max_pixels` constraint
for aperture_name in aperture_names:
aperture = apertures[aperture_name]
aperture[np.isnan(fpix[0])] = 0
ncol = 0
apcopy = np.array(aperture)
for j in range(apcopy.shape[1]):
if np.any(f97[:, j] > satflx):
apcopy[:, j] = 0
ncol += 1
if np.sum(apcopy) + ncol <= max_pixels:
break
if np.sum(apcopy) + ncol > max_pixels:
log.error(
"No apertures available with fewer than %d pixels. Aborting."
% max_pixels)
return None
# Now, finally, we collapse the saturated columns into single pixels
# and make the pixel array 2D
ncol = 0
fpixnew = []
ferrnew = []
# HACK: K2SFF sometimes clips the heads/tails of saturated columns
# That's really bad, since that's where all the information is. Let's
# artificially extend the aperture by two pixels at the top and bottom
# of each saturated column. This *could* increase contamination, but
# it's unlikely since the saturated target is by definition really
# bright
ext = 0
for j in range(aperture.shape[1]):
if np.any(f97[:, j] > satflx):
for i in range(aperture.shape[0]):
if (aperture[i, j] == 0) and \
(np.nanmedian(fpix[:, i, j]) > 0):
if (i + 2 < aperture.shape[0]) and \
aperture[i + 2, j] == 1:
aperture[i, j] = 2
ext += 1
elif (i + 1 < aperture.shape[0]) and \
aperture[i + 1, j] == 1:
aperture[i, j] = 2
ext += 1
elif (i - 1 >= 0) and aperture[i - 1, j] == 1:
aperture[i, j] = 2
ext += 1
elif (i - 2 >= 0) and aperture[i - 2, j] == 1:
aperture[i, j] = 2
ext += 1
if ext:
log.info("Extended saturated columns by %d pixel(s)." % ext)
for j in range(aperture.shape[1]):
if np.any(f97[:, j] > satflx):
marked = False
collapsed = np.zeros(len(fpix[:, 0, 0]))
collapsed_err2 = np.zeros(len(fpix[:, 0, 0]))
for i in range(aperture.shape[0]):
if aperture[i, j]:
if not marked:
aperture[i, j] = AP_COLLAPSED_PIXEL
marked = True
else:
aperture[i, j] = AP_SATURATED_PIXEL
collapsed += fpix[:, i, j]
collapsed_err2 += fpix_err[:, i, j] ** 2
if np.any(collapsed):
fpixnew.append(collapsed)
ferrnew.append(np.sqrt(collapsed_err2))
ncol += 1
else:
for i in range(aperture.shape[0]):
if aperture[i, j]:
fpixnew.append(fpix[:, i, j])
ferrnew.append(fpix_err[:, i, j])
fpix2D = np.array(fpixnew).T
fpix_err2D = np.array(ferrnew).T
log.info("Collapsed %d saturated column(s)." % ncol)
else:
# Check if there are too many pixels
if np.sum(aperture) > max_pixels:
# This case is simpler: we just pick the largest aperture
# that's less than or equal to `max_pixels`
keys = list(apertures.keys())
npix = np.array([np.sum(apertures[k]) for k in keys])
aperture_name = keys[np.argmax(npix * (npix <= max_pixels))]
aperture = apertures[aperture_name]
aperture[np.isnan(fpix[0])] = 0
if np.sum(aperture) > max_pixels:
log.error("No apertures available with fewer than " +
"%d pixels. Aborting." % max_pixels)
return None
log.warn(
"Selected aperture is too big. Proceeding with aperture " +
"`%s` instead." % aperture_name)
# Make the pixel flux array 2D
aperture[np.isnan(fpix[0])] = 0
ap = np.where(aperture & 1)
fpix2D = np.array([f[ap] for f in fpix], dtype='float64')
fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64')
# Compute the background
binds = np.where(aperture ^ 1)
if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0):
bkg = np.nanmedian(np.array([f[binds]
for f in fpix], dtype='float64'), axis=1)
# Uncertainty of the median:
# http://davidmlane.com/hyperstat/A106993.html
bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err],
dtype='float64'), axis=1) \
/ np.sqrt(len(binds[0]))
bkg = bkg.reshape(-1, 1)
bkg_err = bkg_err.reshape(-1, 1)
else:
bkg = 0.
bkg_err = 0.
# Make everything 2D and remove the background
fpix = fpix2D - bkg
fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2)
flux = np.sum(fpix, axis=1)
ferr = np.sqrt(np.sum(fpix_err ** 2, axis=1))
# Get NaN data points
nanmask = np.where(np.isnan(flux) | (flux == 0))[0]
# Get flagged data points -- we won't train our model on them
badmask = []
for b in bad_bits:
badmask += list(np.where(qual & 2 ** (b - 1))[0])
# Flag >10 sigma outliers -- same thing.
tmpmask = np.array(list(set(np.concatenate([badmask, nanmask]))))
t = np.delete(time, tmpmask)
f = np.delete(flux, tmpmask)
f = SavGol(f)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0]
badmask.extend([np.argmax(time == t[i]) for i in bad])
# Campaign 2 hack: the first day or two are screwed up
if campaign == 2:
badmask.extend(np.where(time < 2061.5)[0])
# TODO: Fix time offsets in first half of
# Campaign 0. See note in everest 1.0 code
# Finalize the mask
badmask = np.array(sorted(list(set(badmask))))
# Interpolate the nans
fpix = Interpolate(time, nanmask, fpix)
fpix_err = Interpolate(time, nanmask, fpix_err)
# Return
data = DataContainer()
data.ID = EPIC
data.campaign = campaign
data.cadn = cadn
data.time = time
data.fpix = fpix
data.fpix_err = fpix_err
data.nanmask = nanmask
data.badmask = badmask
data.aperture = aperture
data.aperture_name = aperture_name
data.apertures = apertures
data.quality = qual
data.Xpos = pc1
data.Ypos = pc2
data.meta = fitsheader
data.mag = fitsheader[0]['KEPMAG'][1]
data.pixel_images = pixel_images
data.nearby = nearby
data.hires = hires
data.saturated = saturated
data.bkg = bkg
return data |
python | def setup_session(endpoint_context, areq, uid, client_id='', acr='', salt='salt',
authn_event=None):
"""
Setting up a user session
:param endpoint_context:
:param areq:
:param uid:
:param acr:
:param client_id:
:param salt:
:param authn_event: A already made AuthnEvent
:return:
"""
if authn_event is None and acr:
authn_event = AuthnEvent(uid=uid, salt=salt, authn_info=acr,
authn_time=time.time())
if not client_id:
client_id = areq['client_id']
sid = endpoint_context.sdb.create_authz_session(authn_event, areq,
client_id=client_id,
uid=uid)
endpoint_context.sdb.do_sub(sid, uid, '')
return sid |
java | public int add(Object label) {
ensureCapacity(used);
states[used] = new State(label);
return used++;
} |
python | def scores_to_probs(scores, proba, eps=0.01):
"""Transforms scores to probabilities by applying the logistic function"""
if np.any(~proba):
# Need to convert some of the scores into probabilities
probs = copy.deepcopy(scores)
n_class = len(proba)
for m in range(n_class):
if not proba[m]:
#TODO: incorporate threshold (currently assuming zero)
# find most extreme absolute score
max_extreme_score = max(np.abs(np.min(scores[:,m])),\
np.abs(np.max(scores[:,m])))
k = np.log((1-eps)/eps)/max_extreme_score # scale factor
self._probs[:,m] = expit(k * self.scores[:,m])
return probs
else:
return scores |
java | @Override
public void layoutViews(RecyclerView.Recycler recycler, RecyclerView.State state,
VirtualLayoutManager.LayoutStateWrapper layoutState, LayoutChunkResult result,
LayoutManagerHelper helper) {
// reach the end of this layout
if (isOutOfRange(layoutState.getCurrentPosition())) {
return;
}
int currentPosition = layoutState.getCurrentPosition();
// find corresponding layout container
View view = nextView(recycler, layoutState, helper, result);
if (view == null) {
return;
}
final boolean isOverLapMargin = helper.isEnableMarginOverLap();
VirtualLayoutManager.LayoutParams params = (VirtualLayoutManager.LayoutParams) view.getLayoutParams();
final boolean layoutInVertical = helper.getOrientation() == VERTICAL;
int startSpace = 0, endSpace = 0, gap = 0;
boolean isLayoutEnd = layoutState.getLayoutDirection() == VirtualLayoutManager.LayoutStateWrapper.LAYOUT_END;
boolean isStartLine = isLayoutEnd
? currentPosition == getRange().getLower().intValue()
: currentPosition == getRange().getUpper().intValue();
boolean isEndLine = isLayoutEnd
? currentPosition == getRange().getUpper().intValue()
: currentPosition == getRange().getLower().intValue();
if (isStartLine) {
startSpace = computeStartSpace(helper, layoutInVertical, isLayoutEnd, isOverLapMargin);
}
if (isEndLine) {
endSpace = computeEndSpace(helper, layoutInVertical, isLayoutEnd, isOverLapMargin);
}
if (!isStartLine) {
if (!isOverLapMargin) {
gap = mLayoutWithAnchor ? 0 : mDividerHeight;
} else {
//TODO check layout with anchor
if (isLayoutEnd) {
int marginTop = params.topMargin;
View sibling = helper.findViewByPosition(currentPosition - 1);
int lastMarginBottom = sibling != null ? ((LayoutParams) sibling.getLayoutParams()).bottomMargin : 0;
if (lastMarginBottom >= 0 && marginTop >= 0) {
gap = Math.max(lastMarginBottom, marginTop);
} else {
gap = lastMarginBottom + marginTop;
}
} else {
int marginBottom = params.bottomMargin;
View sibling = helper.findViewByPosition(currentPosition + 1);
int lastMarginTop = sibling != null ? ((LayoutParams) sibling.getLayoutParams()).topMargin : 0;
if (marginBottom >= 0 && lastMarginTop >= 0) {
gap = Math.max(marginBottom, lastMarginTop);
} else {
gap = marginBottom + lastMarginTop;
}
}
}
}
final int widthSize = helper.getContentWidth() - helper.getPaddingLeft() - helper
.getPaddingRight() - getHorizontalMargin() - getHorizontalPadding();
int widthSpec = helper.getChildMeasureSpec(widthSize, params.width, !layoutInVertical);
int heightSpec;
float viewAspectRatio = params.mAspectRatio;
if (!Float.isNaN(viewAspectRatio) && viewAspectRatio > 0) {
heightSpec = View.MeasureSpec.makeMeasureSpec((int) (widthSize / viewAspectRatio + 0.5f),
View.MeasureSpec.EXACTLY);
} else if (!Float.isNaN(mAspectRatio) && mAspectRatio > 0) {
heightSpec = View.MeasureSpec.makeMeasureSpec((int) (widthSize / mAspectRatio + 0.5),
View.MeasureSpec.EXACTLY);
} else {
heightSpec = helper.getChildMeasureSpec(
helper.getContentHeight() - helper.getPaddingTop() - helper.getPaddingBottom()
- getVerticalMargin() - getVerticalPadding(), params.height,
layoutInVertical);
}
if (!isOverLapMargin) {
helper.measureChildWithMargins(view, widthSpec, heightSpec);
} else {
helper.measureChild(view, widthSpec, heightSpec);
}
OrientationHelperEx orientationHelper = helper.getMainOrientationHelper();
result.mConsumed = orientationHelper.getDecoratedMeasurement(view) + startSpace + endSpace + gap;
int left, top, right, bottom;
if (helper.getOrientation() == VERTICAL) {
// not support RTL now
if (helper.isDoLayoutRTL()) {
right = helper.getContentWidth() - helper.getPaddingRight() - mMarginRight - mPaddingRight;
left = right - orientationHelper.getDecoratedMeasurementInOther(view);
} else {
left = helper.getPaddingLeft() + mMarginLeft + mPaddingLeft;
right = left + orientationHelper.getDecoratedMeasurementInOther(view);
}
// whether this layout pass is layout to start or to end
if (layoutState.getLayoutDirection() == VirtualLayoutManager.LayoutStateWrapper.LAYOUT_START) {
// fill start, from bottom to top
bottom = layoutState.getOffset() - startSpace - (isStartLine ? 0 : gap);
top = bottom - orientationHelper.getDecoratedMeasurement(view);
} else {
// fill end, from top to bottom
top = layoutState.getOffset() + startSpace + (isStartLine ? 0 : gap);
bottom = top + orientationHelper.getDecoratedMeasurement(view);
}
} else {
top = helper.getPaddingTop() + mMarginTop + mPaddingTop;
bottom = top + orientationHelper.getDecoratedMeasurementInOther(view);
if (layoutState.getLayoutDirection() == VirtualLayoutManager.LayoutStateWrapper.LAYOUT_START) {
// fill left, from right to left
right = layoutState.getOffset() - startSpace - (isStartLine ? 0 : gap);
left = right - orientationHelper.getDecoratedMeasurement(view);
} else {
// fill right, from left to right
left = layoutState.getOffset() + startSpace + (isStartLine ? 0 : gap);
right = left + orientationHelper.getDecoratedMeasurement(view);
}
}
// We calculate everything with View's bounding box (which includes decor and margins)
// To calculate correct layout position, we subtract margins.
layoutChildWithMargin(view, left, top, right, bottom, helper);
if (DEBUG) {
Log.d(TAG, "laid out child at position " + helper.getPosition(view) + ", with l:"
+ (left + params.leftMargin) + ", t:" + (top + params.topMargin) + ", r:"
+ (right - params.rightMargin) + ", b:" + (bottom - params.bottomMargin));
}
handleStateOnResult(result, view);
mLayoutWithAnchor = false;
} |
java | public CertificateDescriptionInner verify(String resourceGroupName, String resourceName, String certificateName, String ifMatch) {
return verifyWithServiceResponseAsync(resourceGroupName, resourceName, certificateName, ifMatch).toBlocking().single().body();
} |
python | def ρ(self, e):
"""Density of states.
:param e: Energy :math:`E`.
:type e: float
:return: :math:`ρ(E) = \\left|k'(E) k(E)\\right|`
:rtype: float
"""
d = self._dichalcogenide
at, λ = d.at, d.λ
return abs(2 * e - λ) * (2 * at**2)**(-1) |
python | def get_block_transactions(
self,
header: BlockHeader,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
"""
Returns an iterable of transactions for the block speficied by the
given block header.
"""
return self._get_block_transactions(header.transaction_root, transaction_class) |
python | def get_instances_with_configs(configs):
"""Create AndroidDevice instances from a list of dict configs.
Each config should have the required key-value pair 'serial'.
Args:
configs: A list of dicts each representing the configuration of one
android device.
Returns:
A list of AndroidDevice objects.
"""
results = []
for c in configs:
try:
serial = c.pop('serial')
except KeyError:
raise Error(
'Required value "serial" is missing in AndroidDevice config %s.'
% c)
is_required = c.get(KEY_DEVICE_REQUIRED, True)
try:
ad = AndroidDevice(serial)
ad.load_config(c)
except Exception:
if is_required:
raise
ad.log.exception('Skipping this optional device due to error.')
continue
results.append(ad)
return results |
python | def _app_base_start(self, option: str, args: list or tuple) -> None:
'''
Args:
option:
-a <ACTION>
-c <CATEGORY>
-n <COMPONENT>
'''
_, error = self._execute('-s', self.device_sn,
'shell', 'am', 'start', option, *args)
if error and error.startswith('Error'):
raise ApplicationsException(error.split(':', 1)[-1].strip()) |
python | def configure_engine(self):
"""
Configure the databse connection.
Sets appropriate transaction isolation levels and handle errors.
Returns:
True, if we did not encounter any unrecoverable errors, else False.
"""
try:
self.connection.execution_options(isolation_level="SERIALIZABLE")
except sa.exc.ArgumentError:
LOG.debug("Unable to set isolation level to SERIALIZABLE")
return True |
java | public DrawerView addFixedItem(DrawerItem item) {
if (item.getId() <= 0) {
item.setId(System.nanoTime() * 100 + Math.round(Math.random() * 100));
}
for (DrawerItem oldItem : mAdapterFixed.getItems()) {
if (oldItem.getId() == item.getId()) {
mAdapterFixed.remove(oldItem);
break;
}
}
item.attachTo(mAdapterFixed);
mAdapterFixed.add(item);
updateFixedList();
return this;
} |
python | def get_share_file (filename, devel_dir=None):
"""Return a filename in the share directory.
@param devel_dir: directory to search when developing
@ptype devel_dir: string
@param filename: filename to search for
@ptype filename: string
@return: the found filename or None
@rtype: string
@raises: ValueError if not found
"""
paths = [get_share_dir()]
if devel_dir is not None:
# when developing
paths.insert(0, devel_dir)
for path in paths:
fullpath = os.path.join(path, filename)
if os.path.isfile(fullpath):
return fullpath
# not found
msg = "%s not found in %s; check your installation" % (filename, paths)
raise ValueError(msg) |
java | private Node newBranchInstrumentationNode(NodeTraversal traversal, Node node, int idx) {
String arrayName = createArrayName(traversal);
// Create instrumentation Node
Node getElemNode = IR.getelem(IR.name(arrayName), IR.number(idx)); // Make line number 0-based
Node exprNode = IR.exprResult(IR.assign(getElemNode, IR.trueNode()));
// Note line as instrumented
String fileName = traversal.getSourceName();
if (!instrumentationData.containsKey(fileName)) {
instrumentationData.put(fileName, new FileInstrumentationData(fileName, arrayName));
}
return exprNode.useSourceInfoIfMissingFromForTree(node);
} |
python | def set_position(self, x, y, speed=None):
''' Move chuck to absolute position in um'''
if speed:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y %d' % (x, y, speed))
else:
self._intf.write('MoveChuckSubsite %1.1f %1.1f R Y' % (x, y)) |
python | def AgregarOperador(self, cuit, iibb=None, nro_ruca=None, nro_renspa=None,
cuit_autorizado=None, **kwargs):
"Agrego los datos del operador a la liq."
d = {'cuit': cuit,
'iibb': iibb,
'nroRUCA': nro_ruca,
'nroRenspa': nro_renspa,
'cuitAutorizado': cuit_autorizado}
self.solicitud['receptor']['operador'] = d
return True |
python | async def _on_response_prepare(self,
request: web.Request,
response: web.StreamResponse):
"""Non-preflight CORS request response processor.
If request is done on CORS-enabled route, process request parameters
and set appropriate CORS response headers.
"""
if (not self._router_adapter.is_cors_enabled_on_request(request) or
self._router_adapter.is_preflight_request(request)):
# Either not CORS enabled route, or preflight request which is
# handled in its own handler.
return
# Processing response of non-preflight CORS-enabled request.
config = self._router_adapter.get_non_preflight_request_config(request)
# Handle according to part 6.1 of the CORS specification.
origin = request.headers.get(hdrs.ORIGIN)
if origin is None:
# Terminate CORS according to CORS 6.1.1.
return
options = config.get(origin, config.get("*"))
if options is None:
# Terminate CORS according to CORS 6.1.2.
return
assert hdrs.ACCESS_CONTROL_ALLOW_ORIGIN not in response.headers
assert hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS not in response.headers
assert hdrs.ACCESS_CONTROL_EXPOSE_HEADERS not in response.headers
# Process according to CORS 6.1.4.
# Set exposed headers (server headers exposed to client) before
# setting any other headers.
if options.expose_headers == "*":
# Expose all headers that are set in response.
exposed_headers = \
frozenset(response.headers.keys()) - _SIMPLE_RESPONSE_HEADERS
response.headers[hdrs.ACCESS_CONTROL_EXPOSE_HEADERS] = \
",".join(exposed_headers)
elif options.expose_headers:
# Expose predefined list of headers.
response.headers[hdrs.ACCESS_CONTROL_EXPOSE_HEADERS] = \
",".join(options.expose_headers)
# Process according to CORS 6.1.3.
# Set allowed origin.
response.headers[hdrs.ACCESS_CONTROL_ALLOW_ORIGIN] = origin
if options.allow_credentials:
# Set allowed credentials.
response.headers[hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS] = _TRUE |
java | public static List parseAppliesTo(String appliesTo) {
List matches = new ArrayList();
if (appliesTo != null) {
boolean quoted = false;
int index = 0;
ProductMatch match = new ProductMatch();
for (int i = 0; i < appliesTo.length(); i++) {
char c = appliesTo.charAt(i);
if (c == '"') {
quoted = !quoted;
}
if (!quoted) {
if (c == ',') {
match.add(appliesTo.substring(index, i));
index = i + 1;
matches.add(match);
match = new ProductMatch();
} else if (c == ';') {
match.add(appliesTo.substring(index, i));
index = i + 1;
}
}
}
match.add(appliesTo.substring(index));
matches.add(match);
}
return matches;
} |
python | def postprocess(self, json_string):
"""Displays each entry on its own line."""
is_compressing, is_hash, compressed, spaces = False, False, [], 0
for row in json_string.split('\n'):
if is_compressing:
if (row[:spaces + 5] == ' ' * (spaces + 4) +
('"' if is_hash else '{')):
compressed.append(row.rstrip())
elif (len(row) > spaces and row[:spaces] == ' ' * spaces and
re.match('[\]\}],?', row[spaces:].rstrip())):
compressed.append(row.rstrip())
is_compressing = False
else:
compressed[-1] += ' ' + row.strip()
else:
compressed.append(row.rstrip())
if any(a in row for a in ['edges', 'nodes']):
# Fix to handle issues that arise with empty lists
if '[]' in row:
continue
spaces = sum(1 for _ in takewhile(str.isspace, row))
is_compressing, is_hash = True, '{' in row
return '\n'.join(compressed) |
python | def filter(self, source_file, encoding): # noqa A001
"""Parse XML file."""
sources = []
for content, filename, enc in self.get_content(source_file):
self.additional_context = self.get_context(filename)
sources.extend(self._filter(content, source_file, enc))
return sources |
python | def store(self, name=None):
"""
Get a cache store instance by name.
:param name: The cache store name
:type name: str
:rtype: Repository
"""
if name is None:
name = self.get_default_driver()
self._stores[name] = self._get(name)
return self._stores[name] |
python | def decrypt_data(self, name, ciphertext, context="", nonce="", batch_input=None, mount_point=DEFAULT_MOUNT_POINT):
"""Decrypt the provided ciphertext using the named key.
Supported methods:
POST: /{mount_point}/decrypt/{name}. Produces: 200 application/json
:param name: Specifies the name of the encryption key to decrypt against. This is specified as part of the URL.
:type name: str | unicode
:param ciphertext: the ciphertext to decrypt.
:type ciphertext: str | unicode
:param context: Specifies the base64 encoded context for key derivation. This is required if key derivation is
enabled.
:type context: str | unicode
:param nonce: Specifies a base64 encoded nonce value used during encryption. Must be provided if convergent
encryption is enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created
in 0.6.2+.
:type nonce: str | unicode
:param batch_input: Specifies a list of items to be decrypted in a single batch. When this parameter is set, if
the parameters 'ciphertext', 'context' and 'nonce' are also set, they will be ignored. Format for the input
goes like this: [dict(context="b64_context", ciphertext="b64_plaintext"), ...]
:type batch_input: List[dict]
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the request.
:rtype: requests.Response
"""
params = {
'ciphertext': ciphertext,
'context': context,
'nonce': nonce,
'batch_input': batch_input,
}
api_path = '/v1/{mount_point}/decrypt/{name}'.format(
mount_point=mount_point,
name=name,
)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json() |
java | public List<TargetRelationship> getLinkListLevelRelationships() {
final ArrayList<TargetRelationship> relationships = new ArrayList<TargetRelationship>();
for (final TargetRelationship relationship : levelRelationships) {
if (relationship.getType() == RelationshipType.LINKLIST) {
relationships.add(relationship);
}
}
return relationships;
} |
python | def terminate(self, reboot=False):
"""Delete VIOM configuration from iRMC."""
self.root.manage.manage = False
self.root.mode = 'delete'
self.root.init_boot = reboot
self.client.set_profile(self.root.get_json()) |
java | public MetaMethod getMethod(String name, Class[] parameters) {
return impl.pickMethod(name, parameters);
} |
java | public OrderInner get(String deviceName, String resourceGroupName) {
return getWithServiceResponseAsync(deviceName, resourceGroupName).toBlocking().single().body();
} |
java | public final Post withContent(final String content) {
return new Post(id, slug, title, excerpt, content, authorId, author,
publishTimestamp, modifiedTimestamp, status, parentId,
guid, commentCount, metadata, type, mimeType, taxonomyTerms, children);
} |
java | public BlockInfo getBlockInfo(final long blockId) throws IOException {
return retryRPC(() -> {
return GrpcUtils.fromProto(
mClient.getBlockInfo(GetBlockInfoPRequest.newBuilder().setBlockId(blockId).build())
.getBlockInfo());
});
} |
python | def sample(self, qubits: List[ops.Qid], repetitions: int=1):
"""Samples from the wave function at this point in the computation.
Note that this does not collapse the wave function.
Returns:
Measurement results with True corresponding to the `|1>` state.
The outer list is for repetitions, and the inner corresponds to
measurements ordered by the supplied qubits.
"""
return self._stepper.sample_measurements(
indices=[self.qubit_map[q] for q in qubits],
repetitions=repetitions) |
python | def width_aware_splitlines(self, columns):
# type: (int) -> Iterator[FmtStr]
"""Split into lines, pushing doublewidth characters at the end of a line to the next line.
When a double-width character is pushed to the next line, a space is added to pad out the line.
"""
if columns < 2:
raise ValueError("Column width %s is too narrow." % columns)
if wcswidth(self.s) == -1:
raise ValueError('bad values for width aware slicing')
return self._width_aware_splitlines(columns) |
java | public String convertMMORGLengthToString(EDataType eDataType, Object instanceValue) {
return instanceValue == null ? null : instanceValue.toString();
} |
python | def get_section_hdrgos(self):
"""Get the GO group headers explicitly listed in sections."""
return set([h for _, hs in self.sections for h in hs]) if self.sections else set() |
python | def init_model_gaussian1d(observations, nstates, reversible=True):
"""Generate an initial model with 1D-Gaussian output densities
Parameters
----------
observations : list of ndarray((T_i), dtype=float)
list of arrays of length T_i with observation data
nstates : int
The number of states.
Examples
--------
Generate initial model for a gaussian output model.
>>> from bhmm import testsystems
>>> [model, observations, states] = testsystems.generate_synthetic_observations(output='gaussian')
>>> initial_model = init_model_gaussian1d(observations, model.nstates)
"""
ntrajectories = len(observations)
# Concatenate all observations.
collected_observations = np.array([], dtype=config.dtype)
for o_t in observations:
collected_observations = np.append(collected_observations, o_t)
# Fit a Gaussian mixture model to obtain emission distributions and state stationary probabilities.
from bhmm._external.sklearn import mixture
gmm = mixture.GMM(n_components=nstates)
gmm.fit(collected_observations[:,None])
from bhmm import GaussianOutputModel
output_model = GaussianOutputModel(nstates, means=gmm.means_[:,0], sigmas=np.sqrt(gmm.covars_[:,0]))
logger().info("Gaussian output model:\n"+str(output_model))
# Extract stationary distributions.
Pi = np.zeros([nstates], np.float64)
Pi[:] = gmm.weights_[:]
logger().info("GMM weights: %s" % str(gmm.weights_))
# Compute fractional state memberships.
Nij = np.zeros([nstates, nstates], np.float64)
for o_t in observations:
# length of trajectory
T = o_t.shape[0]
# output probability
pobs = output_model.p_obs(o_t)
# normalize
pobs /= pobs.sum(axis=1)[:,None]
# Accumulate fractional transition counts from this trajectory.
for t in range(T-1):
Nij[:,:] = Nij[:,:] + np.outer(pobs[t,:], pobs[t+1,:])
logger().info("Nij\n"+str(Nij))
# Compute transition matrix maximum likelihood estimate.
import msmtools.estimation as msmest
import msmtools.analysis as msmana
Tij = msmest.transition_matrix(Nij, reversible=reversible)
pi = msmana.stationary_distribution(Tij)
# Update model.
model = HMM(pi, Tij, output_model)
return model |
java | @Override
public GetDistributionConfigResult getDistributionConfig(GetDistributionConfigRequest request) {
request = beforeClientExecution(request);
return executeGetDistributionConfig(request);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.