language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public static void initDefaultParameterValues(Connection conn, QueryParameter param,
Map<String, Object> parameterValues) throws QueryException {
List<Serializable> defValues;
if ((param.getDefaultValues() != null) && (param.getDefaultValues().size() > 0)) {
defValues = param.getDefaultValues();
} else {
try {
defValues = ParameterUtil.getDefaultSourceValues(conn, param);
} catch (Exception e) {
throw new QueryException(e);
}
}
initDefaultParameterValues(param, defValues, parameterValues);
} |
python | def write_message(msg, indent=False, mtype='standard', caption=False):
"""Writes message if verbose mode is set."""
if (mtype == 'debug' and config.DEBUG) or (mtype != 'debug' and config.VERBOSE) or mtype == 'error':
message(msg, indent=indent, mtype=mtype, caption=caption) |
python | def MI_modifyInstance(self,
env,
modifiedInstance,
previousInstance,
propertyList,
cimClass):
# pylint: disable=invalid-name
"""Modify a CIM instance
Implements the WBEM operation ModifyInstance in terms
of the set_instance method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider MI_modifyInstance called...')
if propertyList is not None:
plist = [p.lower() for p in propertyList]
filter_instance(modifiedInstance, plist)
modifiedInstance.update(modifiedInstance.path)
self.set_instance(env=env,
instance=modifiedInstance,
previous_instance=previousInstance,
cim_class=cimClass)
logger.log_debug('CIMProvider MI_modifyInstance returning') |
python | def updateClusterSize(self, estimatedNodeCounts):
"""
Given the desired and current size of the cluster, attempts to launch/remove instances to
get to the desired size. Also attempts to remove ignored nodes that were marked for graceful
removal.
Returns the new size of the cluster.
"""
newNodeCounts = defaultdict(int)
for nodeShape, estimatedNodeCount in estimatedNodeCounts.items():
nodeType = self.nodeShapeToType[nodeShape]
newNodeCount = self.setNodeCount(nodeType=nodeType, numNodes=estimatedNodeCount, preemptable=nodeShape.preemptable)
# If we were scaling up a preemptable node type and failed to meet
# our target, we will attempt to compensate for the deficit while scaling
# non-preemptable nodes of this type.
if nodeShape.preemptable:
if newNodeCount < estimatedNodeCount:
deficit = estimatedNodeCount - newNodeCount
logger.debug('Preemptable scaler detected deficit of %d nodes of type %s.' % (deficit, nodeType))
self.preemptableNodeDeficit[nodeType] = deficit
else:
self.preemptableNodeDeficit[nodeType] = 0
newNodeCounts[nodeShape] = newNodeCount
#Attempt to terminate any nodes that we previously designated for
#termination, but which still had workers running.
self._terminateIgnoredNodes()
return newNodeCounts |
python | def get_prediction(self, u=0):
"""
Predicts the next state of the filter and returns it without
altering the state of the filter.
Parameters
----------
u : np.array
optional control input
Returns
-------
(x, P) : tuple
State vector and covariance array of the prediction.
"""
x = dot(self.F, self.x) + dot(self.B, u)
P = self._alpha_sq * dot(dot(self.F, self.P), self.F.T) + self.Q
return (x, P) |
java | public void setTags(java.util.Collection<Tag> tags) {
if (tags == null) {
this.tags = null;
return;
}
this.tags = new com.ibm.cloud.objectstorage.internal.SdkInternalList<Tag>(tags);
} |
python | def __setup_connection(self):
"""
each operation requested represents a session
the session holds information about the plugin running it
and establishes a project object
"""
if self.payload != None and type(self.payload) is dict and 'settings' in self.payload:
config.plugin_client_settings = self.payload['settings']
config.offline = self.args.offline
config.connection = PluginConnection(
client=self.args.client or 'SUBLIME_TEXT_3',
ui=self.args.ui_switch,
args=self.args,
params=self.payload,
operation=self.operation,
verbose=self.args.verbose)
config.project = MavensMateProject(params=self.payload,ui=self.args.ui_switch)
config.sfdc_client = config.project.sfdc_client |
java | public static GrayS16 average( InterleavedS16 input , GrayS16 output ) {
if (output == null) {
output = new GrayS16(input.width, input.height);
} else {
output.reshape(input.width,input.height);
}
if( BoofConcurrency.USE_CONCURRENT ) {
ConvertInterleavedToSingle_MT.average(input,output);
} else {
ConvertInterleavedToSingle.average(input,output);
}
return output;
} |
java | public static void fillItemDefault(Item resourceItem, CmsObject cms, CmsResource resource, Locale locale) {
if (resource == null) {
LOG.error("Error rendering item for 'null' resource");
return;
}
if (resourceItem == null) {
LOG.error("Error rendering 'null' item for resource " + resource.getRootPath());
return;
}
if (cms == null) {
cms = A_CmsUI.getCmsObject();
LOG.warn("CmsObject was 'null', using thread local CmsObject");
}
CmsResourceUtil resUtil = new CmsResourceUtil(cms, resource);
Map<String, CmsProperty> resourceProps = null;
try {
List<CmsProperty> props = cms.readPropertyObjects(resource, false);
resourceProps = new HashMap<String, CmsProperty>();
for (CmsProperty prop : props) {
resourceProps.put(prop.getName(), prop);
}
} catch (CmsException e1) {
LOG.debug("Unable to read properties for resource '" + resource.getRootPath() + "'.", e1);
}
I_CmsResourceType type = OpenCms.getResourceManager().getResourceType(resource);
if (resourceItem.getItemProperty(PROPERTY_TYPE_ICON) != null) {
resourceItem.getItemProperty(PROPERTY_TYPE_ICON).setValue(
new CmsResourceIcon(resUtil, resource.getState(), true));
}
if (resourceItem.getItemProperty(PROPERTY_PROJECT) != null) {
Label projectFlag = null;
switch (resUtil.getProjectState().getMode()) {
case 1:
projectFlag = new Label(
new CmsCssIcon(OpenCmsTheme.ICON_PROJECT_CURRENT).getHtml(resUtil.getLockedInProjectName()),
ContentMode.HTML);
break;
case 2:
projectFlag = new Label(
new CmsCssIcon(OpenCmsTheme.ICON_PROJECT_OTHER).getHtml(resUtil.getLockedInProjectName()),
ContentMode.HTML);
break;
case 5:
projectFlag = new Label(
new CmsCssIcon(OpenCmsTheme.ICON_PUBLISH).getHtml(resUtil.getLockedInProjectName()),
ContentMode.HTML);
break;
default:
}
resourceItem.getItemProperty(PROPERTY_PROJECT).setValue(projectFlag);
}
if (resourceItem.getItemProperty(PROPERTY_INSIDE_PROJECT) != null) {
resourceItem.getItemProperty(PROPERTY_INSIDE_PROJECT).setValue(Boolean.valueOf(resUtil.isInsideProject()));
}
if (resourceItem.getItemProperty(PROPERTY_RELEASED_NOT_EXPIRED) != null) {
resourceItem.getItemProperty(PROPERTY_RELEASED_NOT_EXPIRED).setValue(
Boolean.valueOf(resUtil.isReleasedAndNotExpired()));
}
if (resourceItem.getItemProperty(PROPERTY_RESOURCE_NAME) != null) {
resourceItem.getItemProperty(PROPERTY_RESOURCE_NAME).setValue(resource.getName());
}
if (resourceItem.getItemProperty(PROPERTY_SITE_PATH) != null) {
resourceItem.getItemProperty(PROPERTY_SITE_PATH).setValue(cms.getSitePath(resource));
}
if ((resourceItem.getItemProperty(PROPERTY_TITLE) != null) && (resourceProps != null)) {
resourceItem.getItemProperty(PROPERTY_TITLE).setValue(
resourceProps.containsKey(CmsPropertyDefinition.PROPERTY_TITLE)
? resourceProps.get(CmsPropertyDefinition.PROPERTY_TITLE).getValue()
: "");
}
boolean inNavigation = false;
if ((resourceItem.getItemProperty(PROPERTY_NAVIGATION_TEXT) != null) && (resourceProps != null)) {
resourceItem.getItemProperty(PROPERTY_NAVIGATION_TEXT).setValue(
resourceProps.containsKey(CmsPropertyDefinition.PROPERTY_NAVTEXT)
? resourceProps.get(CmsPropertyDefinition.PROPERTY_NAVTEXT).getValue()
: "");
inNavigation = resourceProps.containsKey(CmsPropertyDefinition.PROPERTY_NAVTEXT);
}
if ((resourceItem.getItemProperty(PROPERTY_NAVIGATION_POSITION) != null) && (resourceProps != null)) {
try {
Float navPos = resourceProps.containsKey(CmsPropertyDefinition.PROPERTY_NAVPOS)
? Float.valueOf(resourceProps.get(CmsPropertyDefinition.PROPERTY_NAVPOS).getValue())
: (inNavigation ? Float.valueOf(Float.MAX_VALUE) : null);
resourceItem.getItemProperty(PROPERTY_NAVIGATION_POSITION).setValue(navPos);
inNavigation = navPos != null;
} catch (Exception e) {
LOG.debug("Error evaluating navPos property", e);
}
}
if (resourceItem.getItemProperty(PROPERTY_IN_NAVIGATION) != null) {
if (inNavigation
&& (resourceProps != null)
&& resourceProps.containsKey(CmsPropertyDefinition.PROPERTY_NAVINFO)
&& CmsClientSitemapEntry.HIDDEN_NAVIGATION_ENTRY.equals(
resourceProps.get(CmsPropertyDefinition.PROPERTY_NAVINFO).getValue())) {
inNavigation = false;
}
resourceItem.getItemProperty(PROPERTY_IN_NAVIGATION).setValue(Boolean.valueOf(inNavigation));
}
if ((resourceItem.getItemProperty(PROPERTY_COPYRIGHT) != null) && (resourceProps != null)) {
resourceItem.getItemProperty(PROPERTY_COPYRIGHT).setValue(
resourceProps.containsKey(CmsPropertyDefinition.PROPERTY_COPYRIGHT)
? resourceProps.get(CmsPropertyDefinition.PROPERTY_COPYRIGHT).getValue()
: "");
}
if ((resourceItem.getItemProperty(PROPERTY_CACHE) != null) && (resourceProps != null)) {
resourceItem.getItemProperty(PROPERTY_CACHE).setValue(
resourceProps.containsKey(CmsPropertyDefinition.PROPERTY_CACHE)
? resourceProps.get(CmsPropertyDefinition.PROPERTY_CACHE).getValue()
: "");
}
if (resourceItem.getItemProperty(PROPERTY_RESOURCE_TYPE) != null) {
resourceItem.getItemProperty(PROPERTY_RESOURCE_TYPE).setValue(
CmsWorkplaceMessages.getResourceTypeName(locale, type.getTypeName()));
}
if (resourceItem.getItemProperty(PROPERTY_IS_FOLDER) != null) {
resourceItem.getItemProperty(PROPERTY_IS_FOLDER).setValue(Boolean.valueOf(resource.isFolder()));
}
if (resourceItem.getItemProperty(PROPERTY_SIZE) != null) {
if (resource.isFile()) {
resourceItem.getItemProperty(PROPERTY_SIZE).setValue(Integer.valueOf(resource.getLength()));
}
}
if (resourceItem.getItemProperty(PROPERTY_PERMISSIONS) != null) {
resourceItem.getItemProperty(PROPERTY_PERMISSIONS).setValue(resUtil.getPermissionString());
}
if (resourceItem.getItemProperty(PROPERTY_DATE_MODIFIED) != null) {
resourceItem.getItemProperty(PROPERTY_DATE_MODIFIED).setValue(Long.valueOf(resource.getDateLastModified()));
}
if (resourceItem.getItemProperty(PROPERTY_USER_MODIFIED) != null) {
resourceItem.getItemProperty(PROPERTY_USER_MODIFIED).setValue(resUtil.getUserLastModified());
}
if (resourceItem.getItemProperty(PROPERTY_DATE_CREATED) != null) {
resourceItem.getItemProperty(PROPERTY_DATE_CREATED).setValue(Long.valueOf(resource.getDateCreated()));
}
if (resourceItem.getItemProperty(PROPERTY_USER_CREATED) != null) {
resourceItem.getItemProperty(PROPERTY_USER_CREATED).setValue(resUtil.getUserCreated());
}
if (resourceItem.getItemProperty(PROPERTY_DATE_RELEASED) != null) {
long release = resource.getDateReleased();
if (release != CmsResource.DATE_RELEASED_DEFAULT) {
resourceItem.getItemProperty(PROPERTY_DATE_RELEASED).setValue(Long.valueOf(release));
} else {
resourceItem.getItemProperty(PROPERTY_DATE_RELEASED).setValue(null);
}
}
if (resourceItem.getItemProperty(PROPERTY_DATE_EXPIRED) != null) {
long expire = resource.getDateExpired();
if (expire != CmsResource.DATE_EXPIRED_DEFAULT) {
resourceItem.getItemProperty(PROPERTY_DATE_EXPIRED).setValue(Long.valueOf(expire));
} else {
resourceItem.getItemProperty(PROPERTY_DATE_EXPIRED).setValue(null);
}
}
if (resourceItem.getItemProperty(PROPERTY_STATE_NAME) != null) {
resourceItem.getItemProperty(PROPERTY_STATE_NAME).setValue(resUtil.getStateName());
}
if (resourceItem.getItemProperty(PROPERTY_STATE) != null) {
resourceItem.getItemProperty(PROPERTY_STATE).setValue(resource.getState());
}
if (resourceItem.getItemProperty(PROPERTY_USER_LOCKED) != null) {
resourceItem.getItemProperty(PROPERTY_USER_LOCKED).setValue(resUtil.getLockedByName());
}
} |
java | public static <T extends RunListener> Optional<T> getAttachedListener(Class<T> listenerType) {
return Run.getAttachedListener(listenerType);
} |
java | @SuppressWarnings({ "unchecked", "rawtypes" })
public static <T> Collection<T> create(Class<?> collectionType) {
Collection<T> list = null;
if (collectionType.isAssignableFrom(AbstractCollection.class)) {
// 抽象集合默认使用ArrayList
list = new ArrayList<>();
}
// Set
else if (collectionType.isAssignableFrom(HashSet.class)) {
list = new HashSet<>();
} else if (collectionType.isAssignableFrom(LinkedHashSet.class)) {
list = new LinkedHashSet<>();
} else if (collectionType.isAssignableFrom(TreeSet.class)) {
list = new TreeSet<>();
} else if (collectionType.isAssignableFrom(EnumSet.class)) {
list = (Collection<T>) EnumSet.noneOf((Class<Enum>) ClassUtil.getTypeArgument(collectionType));
}
// List
else if (collectionType.isAssignableFrom(ArrayList.class)) {
list = new ArrayList<>();
} else if (collectionType.isAssignableFrom(LinkedList.class)) {
list = new LinkedList<>();
}
// Others,直接实例化
else {
try {
list = (Collection<T>) ReflectUtil.newInstance(collectionType);
} catch (Exception e) {
throw new UtilException(e);
}
}
return list;
} |
java | public int count(String column, Object value) {
return count(Operators.match(column, value));
} |
java | protected static <T> Action1<Throwable> onErrorFrom(final Observer<T> observer) {
return new Action1<Throwable>() {
@Override
public void call(Throwable t1) {
observer.onError(t1);
}
};
} |
java | private void addValues(final Document doc, final PropertyData prop) throws RepositoryException
{
int propType = prop.getType();
String fieldName = resolver.createJCRName(prop.getQPath().getName()).getAsString();
if (propType == PropertyType.BINARY)
{
InternalQName propName = prop.getQPath().getName();
List<ValueData> data = null;
if (node.getQPath().getName().equals(Constants.JCR_CONTENT) && isIndexed(propName))
{
// seems nt:file found, try for nt:resource props
PropertyData pmime = node.getProperty(Constants.JCR_MIMETYPE.getAsString());
if (pmime == null && !node.containAllProperties())
{
pmime =
(PropertyData)stateProvider.getItemData(node, new QPathEntry(Constants.JCR_MIMETYPE, 0),
ItemType.PROPERTY);
}
if (pmime != null && pmime.getValues() != null && !pmime.getValues().isEmpty())
{
// ok, have a reader
// if the prop obtainer from cache it will contains a values,
// otherwise read prop with values from DM
PropertyData propData =
prop.getValues() != null && !prop.getValues().isEmpty() ? prop : ((PropertyData)stateProvider
.getItemData(node, new QPathEntry(Constants.JCR_DATA, 0), ItemType.PROPERTY));
// index if have jcr:mimeType sibling for this binary property only
try
{
if (propData == null || (data = propData.getValues()) == null || data.isEmpty())
{
if (LOG.isDebugEnabled())
{
LOG.debug("No value found for the property located at " + prop.getQPath().getAsString());
}
return;
}
DocumentReader dreader =
extractor.getDocumentReader(ValueDataUtil.getString(pmime.getValues().get(0)));
// check the jcr:encoding property
PropertyData encProp = node.getProperty(Constants.JCR_ENCODING.getAsString());
if (encProp == null && !node.containAllProperties())
{
encProp =
(PropertyData)stateProvider.getItemData(node, new QPathEntry(Constants.JCR_ENCODING, 0),
ItemType.PROPERTY);
}
String encoding = null;
if (encProp != null && encProp.getValues() != null && !encProp.getValues().isEmpty())
{
// encoding parameter used
encoding = ValueDataUtil.getString(encProp.getValues().get(0));
}
else
{
if (LOG.isDebugEnabled())
{
LOG.debug("No encoding found for the node located at " + node.getQPath().getAsString());
}
}
if (dreader instanceof AdvancedDocumentReader)
{
// its a tika document reader that supports getContentAsReader
for (ValueData pvd : data)
{
// tikaDocumentReader will close inputStream, so no need to close it at finally
// statement
InputStream is = null;
is = pvd.getAsStream();
Reader reader;
if (encoding != null)
{
reader = ((AdvancedDocumentReader)dreader).getContentAsReader(is, encoding);
}
else
{
reader = ((AdvancedDocumentReader)dreader).getContentAsReader(is);
}
doc.add(createFulltextField(reader));
}
}
else
{
// old-style document reader
for (ValueData pvd : data)
{
InputStream is = null;
try
{
is = pvd.getAsStream();
Reader reader;
if (encoding != null)
{
reader = new StringReader(dreader.getContentAsText(is, encoding));
}
else
{
reader = new StringReader(dreader.getContentAsText(is));
}
doc.add(createFulltextField(reader));
}
finally
{
try
{
is.close();
}
catch (Throwable e) //NOSONAR
{
if (LOG.isTraceEnabled())
{
LOG.trace("An exception occurred: " + e.getMessage());
}
}
}
}
}
if (data.size() > 1)
{
// real multi-valued
addMVPName(doc, prop.getQPath().getName());
}
}
catch (DocumentReadException e)
{
if (LOG.isDebugEnabled())
{
LOG.debug("Cannot extract the full text content of the property " + propData.getQPath().getAsString()
+ ", propery id '" + propData.getIdentifier() + "' : " + e, e);
}
else
{
LOG.warn("Cannot extract the full text content of the property " + propData.getQPath().getAsString()
+ ", propery id '" + propData.getIdentifier());
}
}
catch (HandlerNotFoundException e)
{
// no handler - no index
if (LOG.isDebugEnabled())
{
LOG.debug("Can not indexing the document by path " + propData.getQPath().getAsString()
+ ", propery id '" + propData.getIdentifier() + "' : " + e, e);
}
}
catch (IOException e)
{
// no data - no index
if (LOG.isDebugEnabled())
{
LOG.debug("An IO exception occurred while trying to extract the full text content of the property " + propData.getQPath().getAsString()
+ ", propery id '" + propData.getIdentifier() + "' : " + e, e);
}
else
{
LOG.warn("An IO exception occurred while trying to extract the full text content of the property " + propData.getQPath().getAsString()
+ ", propery id '" + propData.getIdentifier());
}
}
catch (Exception e)
{
if (LOG.isDebugEnabled())
{
LOG.debug("An exception occurred while trying to extract the full text content of the property " + propData.getQPath().getAsString()
+ ", propery id '" + propData.getIdentifier() + "' : " + e, e);
}
else
{
LOG.warn("An exception occurred while trying to extract the full text content of the property " + propData.getQPath().getAsString()
+ ", propery id '" + propData.getIdentifier());
}
}
}
else
{
if (LOG.isDebugEnabled())
{
LOG.debug("no mime type found for the node located at " + node.getQPath().getAsString());
}
}
}
}
else
{
try
{
// if the prop obtainer from cache it will contains a values, otherwise
// read prop with values from DM
// We access to the Item by path to avoid having to rebuild the path if needed in case
// the indexingLoadBatchingThreshold is enabled only otherwise we get it from the id like
// before
PropertyData propData =
prop.getValues() != null && !prop.getValues().isEmpty() ? prop : (PropertyData)(loadPropertyByName
? stateProvider.getItemData(node, new QPathEntry(prop.getQPath().getName(), 0), ItemType.PROPERTY)
: stateProvider.getItemData(prop.getIdentifier()));
List<ValueData> data;
if (propData == null || (data = propData.getValues()) == null || data.isEmpty())
{
if (LOG.isDebugEnabled())
{
LOG.warn("null value found at property " + prop.getQPath().getAsString());
}
return;
}
InternalQName name = prop.getQPath().getName();
for (ValueData value : data)
{
switch (propType)
{
case PropertyType.BOOLEAN :
if (isIndexed(name))
{
addBooleanValue(doc, fieldName, ValueDataUtil.getBoolean(value));
}
break;
case PropertyType.DATE :
if (isIndexed(name))
{
addCalendarValue(doc, fieldName, ValueDataUtil.getDate(value));
}
break;
case PropertyType.DOUBLE :
if (isIndexed(name))
{
addDoubleValue(doc, fieldName, ValueDataUtil.getDouble(value));
}
break;
case PropertyType.LONG :
if (isIndexed(name))
{
addLongValue(doc, fieldName, ValueDataUtil.getLong(value));
}
break;
case PropertyType.REFERENCE :
if (isIndexed(name))
{
addReferenceValue(doc, fieldName, ValueDataUtil.getString(value));
}
break;
case PropertyType.PATH :
if (isIndexed(name))
{
addPathValue(doc, fieldName,
resolver.createJCRPath(ValueDataUtil.getPath(value)).getAsString(false));
}
break;
case PropertyType.STRING :
if (isIndexed(name))
{
// never fulltext index jcr:uuid String
if (name.equals(Constants.JCR_UUID))
{
addStringValue(doc, fieldName, ValueDataUtil.getString(value), false, false, DEFAULT_BOOST,
true);
}
else
{
addStringValue(doc, fieldName, ValueDataUtil.getString(value), true,
isIncludedInNodeIndex(name), getPropertyBoost(name), useInExcerpt(name));
}
}
break;
case PropertyType.NAME :
// jcr:primaryType and jcr:mixinTypes are required for correct
// node type resolution in queries
if (isIndexed(name) || name.equals(Constants.JCR_PRIMARYTYPE)
|| name.equals(Constants.JCR_MIXINTYPES))
{
addNameValue(doc, fieldName, resolver.createJCRName(ValueDataUtil.getName(value)).getAsString());
}
break;
case ExtendedPropertyType.PERMISSION :
break;
default :
throw new IllegalArgumentException("illegal internal value type " + propType);
}
// add length
// add not planed
if (indexFormatVersion.getVersion() >= IndexFormatVersion.V3.getVersion())
{
addLength(doc, fieldName, value, propType);
}
}
if (data.size() > 1)
{
// real multi-valued
addMVPName(doc, prop.getQPath().getName());
}
}
catch (RepositoryException e)
{
LOG.error("Index of property value error. " + prop.getQPath().getAsString() + ".", e);
throw new RepositoryException("Index of property value error. " + prop.getQPath().getAsString() + ". " + e,
e);
}
}
} |
python | def match(self, models, results, relation):
"""
Match the eagerly loaded results to their parents.
:type models: list
:type results: Collection
:type relation: str
"""
dictionary = self._build_dictionary(results)
for model in models:
key = model.get_key()
if key in dictionary:
collection = self._related.new_collection(dictionary[key])
model.set_relation(relation, collection)
return models |
python | def hash(value, chars=None):
'Get N chars (default: all) of secure hash hexdigest of value.'
value = hash_func(value).hexdigest()
if chars: value = value[:chars]
return mark_safe(value) |
java | public void sendUpdateQuery(String queryString, SPARQLQueryBindingSet bindings, boolean includeInferred, String baseURI) throws IOException, RepositoryException, MalformedQueryException,UpdateExecutionException {
getClient().performUpdateQuery(queryString, bindings, this.tx, includeInferred, baseURI);
} |
python | def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = self.get_dirs()
for template_dir in template_dirs:
try:
name = safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
pass
else:
if Origin:
yield Origin(
name=name,
template_name=template_name,
loader=self,
)
else:
yield name |
java | @Override
public boolean isSameState(Word<I> input1, S s1, Word<I> input2, S s2) {
return s1.equals(s2);
} |
python | def remove_handlers_bound_to_instance(self, obj):
"""
Remove all handlers bound to given object instance.
This is useful to remove all handler methods that are part of an instance.
:param object obj: Remove handlers that are methods of this instance
"""
for handler in self.handlers:
if handler.im_self == obj:
self -= handler |
python | def _the_view_kwd(self, postinfo):
'''
Generate the kwd dict for view.
:param postinfo: the postinfo
:return: dict
'''
kwd = {
'pager': '',
'url': self.request.uri,
'cookie_str': tools.get_uuid(),
'daohangstr': '',
'signature': postinfo.uid,
'tdesc': '',
'eval_0': MEvaluation.app_evaluation_count(postinfo.uid, 0),
'eval_1': MEvaluation.app_evaluation_count(postinfo.uid, 1),
'login': 1 if self.get_current_user() else 0,
'has_image': 0,
'parentlist': MCategory.get_parent_list(),
'parentname': '',
'catname': '',
'router': router_post[postinfo.kind]
}
return kwd |
python | def implements(obj, protocol):
"""Does the object 'obj' implement the 'prococol'?"""
if isinstance(obj, type):
raise TypeError("First argument to implements must be an instance. "
"Got %r." % obj)
return isinstance(obj, protocol) or issubclass(AnyType, protocol) |
python | def register(reg_name):
"""Register a subclass of CustomOpProp to the registry with name reg_name."""
def do_register(prop_cls):
"""Register a subclass of CustomOpProp to the registry."""
fb_functype = CFUNCTYPE(c_int, c_int, POINTER(c_void_p), POINTER(c_int),
POINTER(c_int), c_int, c_void_p)
del_functype = CFUNCTYPE(c_int, c_void_p)
infershape_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int),
POINTER(POINTER(mx_int)), c_void_p)
infertype_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), c_void_p)
inferstorage_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), c_void_p)
inferstorage_backward_functype = CFUNCTYPE(c_int, c_int, POINTER(c_int), \
POINTER(c_int), c_void_p)
list_functype = CFUNCTYPE(c_int, POINTER(POINTER(POINTER(c_char))), c_void_p)
deps_functype = CFUNCTYPE(c_int, c_int_p, c_int_p, c_int_p,
c_int_p, POINTER(c_int_p), c_void_p)
createop_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(POINTER(mx_uint)),
POINTER(c_int), POINTER(c_int),
POINTER(MXCallbackList), c_void_p)
req_enum = ('null', 'write', 'inplace', 'add')
def creator(op_type, argc, keys, vals, ret):
"""internal function"""
assert py_str(op_type) == reg_name
kwargs = dict([(py_str(keys[i]), py_str(vals[i])) for i in range(argc)])
op_prop = prop_cls(**kwargs)
def infer_shape_entry(num_tensor, tensor_dims,
tensor_shapes, _):
"""C Callback for ``CustomOpProp::InferShape``."""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
shapes = [[tensor_shapes[i][j] for j in range(tensor_dims[i])]
for i in range(n_in)]
ret = op_prop.infer_shape(shapes)
if len(ret) == 2:
ishape, oshape = ret
ashape = []
elif len(ret) == 3:
ishape, oshape, ashape = ret
else:
raise AssertionError("infer_shape must return 2 or 3 lists")
assert len(oshape) == n_out, \
"InferShape Error: expecting %d entries in returned output " \
"shapes, got %d."%(n_out, len(oshape))
assert len(ishape) == n_in, \
"InferShape Error: expecting %d entries in returned input " \
"shapes, got %d."%(n_in, len(ishape))
assert len(ashape) == n_aux, \
"InferShape Error: expecting %d entries in returned aux state " \
"shapes, got %d."%(n_aux, len(ashape))
rshape = list(ishape) + list(oshape) + list(ashape)
for i in range(n_in+n_out+n_aux):
tensor_shapes[i] = cast(c_array_buf(mx_int,
array('i', rshape[i])),
POINTER(mx_int))
tensor_dims[i] = len(rshape[i])
infer_shape_entry._ref_holder = [tensor_shapes]
except Exception:
print('Error in %s.infer_shape: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_storage_type_backward_entry(num_tensor, tensor_stypes, tags, _):
# pylint: disable=C0301
"""C Callback for CustomOpProp::InferStorageTypeBackward"""
try:
tensors = [[] for i in range(5)]
for i in range(num_tensor):
tensors[tags[i]].append(_STORAGE_TYPE_ID_TO_STR[tensor_stypes[i]])
# Ordering of stypes: ograd, input, output, igrad, aux
tensors = [tensors[3], tensors[0], tensors[1], tensors[2], tensors[4]]
ret = op_prop.infer_storage_type_backward(tensors[0],
tensors[1],
tensors[2],
tensors[3],
tensors[4])
if len(ret) == 4:
ret += []
elif len(ret) == 5:
pass
else:
raise AssertionError("infer_storage_type_backward must return 4 or 5 lists")
assert len(ret[0]) == len(tensors[0]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned output gradient " \
"stypes, got %d."%(len(tensors[0]), len(ret[0]))
assert len(ret[1]) == len(tensors[1]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned input stypes, " \
"got %d."%(len(tensors[1]), len(ret[1]))
assert len(ret[2]) == len(tensors[2]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned output stypes, " \
"got %d."%(len(tensors[2]), len(ret[2]))
assert len(ret[3]) == len(tensors[3]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned input gradient stypes, " \
"got %d."%(len(tensors[3]), len(ret[3]))
assert len(ret[4]) == len(tensors[4]), \
"InferStorageTypeBackward Error: expecting == %d " \
"entries in returned aux stypes, " \
"got %d."%(len(tensors[4]), len(ret[4]))
rstype = []
for i, ret_list in enumerate(ret):
rstype.extend(ret_list)
for i, stype in enumerate(rstype):
assert stype != _STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_UNDEFINED], \
"stype should not be undefined"
assert stype in _STORAGE_TYPE_STR_TO_ID, \
"Provided stype: %s is not valid " \
"valid stypes are %s, %s, %s"%(stype,
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_DEFAULT],
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_ROW_SPARSE],
_STORAGE_TYPE_ID_TO_STR[_STORAGE_TYPE_CSR])
tensor_stypes[i] = _STORAGE_TYPE_STR_TO_ID[stype]
infer_storage_type_backward_entry._ref_holder = [tensor_stypes]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_storage_type_entry(num_tensor, tensor_stypes, _):
"""C Callback for CustomOpProp::InferStorageType"""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
stypes = [_STORAGE_TYPE_ID_TO_STR[tensor_stypes[i]] for i in range(n_in)]
ret = op_prop.infer_storage_type(stypes)
if len(ret) == 2:
istype, ostype = ret
astype = []
elif len(ret) == 3:
istype, ostype, astype = ret
else:
raise AssertionError("infer_storage_type must return 2 or 3 lists")
assert len(ostype) == n_out, \
"InferStorageType Error: expecting %d entries in returned output " \
"stypes, got %d."%(n_out, len(ostype))
assert len(istype) == n_in, \
"InferStorageType Error: expecting %d entries in returned input " \
"stypes, got %d."%(n_in, len(istype))
assert len(astype) == n_aux, \
"InferStorageType Error: expecting %d entries in returned aux state " \
"stypes, got %d."%(n_aux, len(astype))
rtype = list(istype) + list(ostype) + list(astype)
for i, dtype in enumerate(rtype):
tensor_stypes[i] = _STORAGE_TYPE_STR_TO_ID[dtype]
infer_storage_type_entry._ref_holder = [tensor_stypes]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def infer_type_entry(num_tensor, tensor_types, _):
"""C Callback for CustomOpProp::InferType"""
try:
n_in = len(op_prop.list_arguments())
n_out = len(op_prop.list_outputs())
n_aux = len(op_prop.list_auxiliary_states())
assert num_tensor == n_in + n_out + n_aux
types = [_DTYPE_MX_TO_NP[tensor_types[i]] for i in range(n_in)]
ret = op_prop.infer_type(types)
if len(ret) == 2:
itype, otype = ret
atype = []
elif len(ret) == 3:
itype, otype, atype = ret
else:
raise AssertionError("infer_type must return 2 or 3 lists")
assert len(otype) == n_out, \
"InferType Error: expecting %d entries in returned output " \
"types, got %d."%(n_out, len(otype))
assert len(itype) == n_in, \
"InferType Error: expecting %d entries in returned input " \
"types, got %d."%(n_in, len(itype))
assert len(atype) == n_aux, \
"InferType Error: expecting %d entries in returned aux state " \
"types, got %d."%(n_aux, len(atype))
rtype = list(itype) + list(otype) + list(atype)
for i, dtype in enumerate(rtype):
tensor_types[i] = _DTYPE_NP_TO_MX[dtype]
infer_type_entry._ref_holder = [tensor_types]
except Exception:
print('Error in %s.infer_type: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_outputs_entry(out, _):
"""C Callback for CustomOpProp::ListOutputs"""
try:
ret = op_prop.list_outputs()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_outputs_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_outputs: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_arguments_entry(out, _):
"""C Callback for CustomOpProp::ListArguments"""
try:
ret = op_prop.list_arguments()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_arguments_entry._ref_holder = [out]
except Exception:
print('Error in %s.list_arguments: %s' % (reg_name, traceback.format_exc()))
return False
return True
def list_auxiliary_states_entry(out, _):
"""C Callback for CustomOpProp::ListAuxiliaryStates"""
try:
ret = op_prop.list_auxiliary_states()
ret = [c_str(i) for i in ret] + [c_char_p(0)]
ret = c_array(c_char_p, ret)
out[0] = cast(ret, POINTER(POINTER(c_char)))
list_auxiliary_states_entry._ref_holder = [out]
except Exception:
tb = traceback.format_exc()
print('Error in %s.list_auxiliary_states: %s' % (reg_name, tb))
return False
return True
def declare_backward_dependency_entry(out_grad, in_data, out_data, num_dep, deps, _):
"""C Callback for CustomOpProp::DeclareBacwardDependency"""
try:
out_grad = [out_grad[i] for i in range(len(op_prop.list_outputs()))]
in_data = [in_data[i] for i in range(len(op_prop.list_arguments()))]
out_data = [out_data[i] for i in range(len(op_prop.list_outputs()))]
rdeps = op_prop.declare_backward_dependency(out_grad, in_data, out_data)
num_dep[0] = len(rdeps)
_registry.result_deps = set()
for dep in rdeps:
_registry.result_deps.add(dep)
rdeps = cast(c_array_buf(c_int, array('i', rdeps)), c_int_p)
deps[0] = rdeps
declare_backward_dependency_entry._ref_holder = [deps]
except Exception:
tb = traceback.format_exc()
print('Error in %s.declare_backward_dependency: %s' % (reg_name, tb))
return False
return True
def create_operator_entry(ctx, num_inputs, shapes, ndims, dtypes, ret, _):
"""C Callback for CustomOpProp::CreateOperator"""
try:
ctx = py_str(ctx)
sep = ctx.find('(')
ctx = context.Context(ctx[:sep], int(ctx[sep+1:-1]))
ndims = [ndims[i] for i in range(num_inputs)]
shapes = [[shapes[i][j] for j in range(ndims[i])] for i in range(num_inputs)]
dtypes = [dtypes[i] for i in range(num_inputs)]
op = op_prop.create_operator(ctx, shapes, dtypes)
def forward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Forward"""
try:
tensors = [[] for i in range(5)]
for i in range(num_ndarray):
if tags[i] == 1 or tags[i] == 4:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=True))
else:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=False))
reqs = [req_enum[reqs[i]] for i in range(len(tensors[1]))]
with ctx:
op.forward(is_train=is_train, req=reqs,
in_data=tensors[0], out_data=tensors[1],
aux=tensors[4])
except Exception:
print('Error in CustomOp.forward: %s' % traceback.format_exc())
return False
return True
def backward_entry(num_ndarray, ndarraies, tags, reqs, is_train, _):
"""C Callback for CustomOp::Backward"""
# pylint: disable=W0613
try:
tensors = [[] for i in range(5)]
num_outputs = len(op_prop.list_outputs())
num_args = len(op_prop.list_arguments())
for i in range(num_ndarray):
if i in _registry.result_deps or i >= (num_outputs * 2 + num_args):
# If it is a backward dependency or output or aux:
# Set stype as undefined so that it returns
# ndarray based on existing stype
stype = _STORAGE_TYPE_UNDEFINED
else:
# If it is some input, output or out grad ndarray not part of
# backward dependency it is empty and thus the ndarray should
# be set to default
stype = _STORAGE_TYPE_DEFAULT
if tags[i] == 2 or tags[i] == 4:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=True,
stype=stype))
else:
tensors[tags[i]].append(_ndarray_cls(cast(ndarraies[i],
NDArrayHandle),
writable=False,
stype=stype))
reqs = [req_enum[reqs[i]] for i in range(len(tensors[2]))]
with ctx:
op.backward(req=reqs,
in_data=tensors[0], out_data=tensors[1],
in_grad=tensors[2], out_grad=tensors[3],
aux=tensors[4])
except Exception:
print('Error in CustomOp.backward: %s' % traceback.format_exc())
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
fb_functype(forward_entry),
fb_functype(backward_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None, None, None]
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op._ref_holder = [ret]
_registry.ref_holder[cur] = op
except Exception:
print('Error in %s.create_operator: %s' % (reg_name, traceback.format_exc()))
return False
return True
cur = _registry.inc()
def delete_entry(_):
"""C Callback for CustomOpProp::del"""
try:
del _registry.ref_holder[cur]
except Exception:
print('Error in CustomOpProp.delete: %s' % traceback.format_exc())
return False
return True
callbacks = [del_functype(delete_entry),
list_functype(list_arguments_entry),
list_functype(list_outputs_entry),
list_functype(list_auxiliary_states_entry),
infershape_functype(infer_shape_entry),
deps_functype(declare_backward_dependency_entry),
createop_functype(create_operator_entry),
infertype_functype(infer_type_entry),
inferstorage_functype(infer_storage_type_entry),
inferstorage_backward_functype(infer_storage_type_backward_entry)]
callbacks = [cast(i, CFUNCTYPE(c_int)) for i in callbacks]
contexts = [None]*len(callbacks)
ret[0] = MXCallbackList(c_int(len(callbacks)),
cast(c_array(CFUNCTYPE(c_int), callbacks),
POINTER(CFUNCTYPE(c_int))),
cast(c_array(c_void_p, contexts),
POINTER(c_void_p)))
op_prop._ref_holder = [ret]
_registry.ref_holder[cur] = op_prop
return True
creator_functype = CFUNCTYPE(c_int, c_char_p, c_int, POINTER(c_char_p),
POINTER(c_char_p), POINTER(MXCallbackList))
creator_func = creator_functype(creator)
check_call(_LIB.MXCustomOpRegister(c_str(reg_name), creator_func))
cur = _registry.inc()
_registry.ref_holder[cur] = creator_func
return prop_cls
return do_register |
java | public Peer addPeer(String url, String pem) {
Peer peer = new Peer(url, pem, this);
this.peers.add(peer);
return peer;
} |
python | def _blank_param_value(value):
"""Remove the content from *value* while keeping its whitespace.
Replace *value*\\ 's nodes with two text nodes, the first containing
whitespace from before its content and the second containing whitespace
from after its content.
"""
sval = str(value)
if sval.isspace():
before, after = "", sval
else:
match = re.search(r"^(\s*).*?(\s*)$", sval, FLAGS)
before, after = match.group(1), match.group(2)
value.nodes = [Text(before), Text(after)] |
java | public void marshall(GetJobRequest getJobRequest, ProtocolMarshaller protocolMarshaller) {
if (getJobRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(getJobRequest.getId(), ID_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def _set_load_balance(self, v, load=False):
"""
Setter method for load_balance, mapped from YANG variable /interface/port_channel/load_balance (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_load_balance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_load_balance() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'src-dst-ip-port': {'value': 6}, u'src-mac-vid': {'value': 2}, u'src-dst-ip': {'value': 4}, u'src-dst-ip-mac-vid': {'value': 5}, u'dst-mac-vid': {'value': 1}, u'src-dst-mac-vid': {'value': 3}, u'src-dst-ip-mac-vid-port': {'value': 7}},), default=unicode("src-dst-ip-mac-vid-port"), is_leaf=True, yang_name="load-balance", rest_name="load-balance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Load balancing Commands'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """load_balance must be of a type compatible with enumeration""",
'defined-type': "brocade-interface:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'src-dst-ip-port': {'value': 6}, u'src-mac-vid': {'value': 2}, u'src-dst-ip': {'value': 4}, u'src-dst-ip-mac-vid': {'value': 5}, u'dst-mac-vid': {'value': 1}, u'src-dst-mac-vid': {'value': 3}, u'src-dst-ip-mac-vid-port': {'value': 7}},), default=unicode("src-dst-ip-mac-vid-port"), is_leaf=True, yang_name="load-balance", rest_name="load-balance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Load balancing Commands'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='enumeration', is_config=True)""",
})
self.__load_balance = t
if hasattr(self, '_set'):
self._set() |
python | async def fetchmany(self, size=None):
"""Fetch many rows, just like DB-API
cursor.fetchmany(size=cursor.arraysize).
If rows are present, the cursor remains open after this is called.
Else the cursor is automatically closed and an empty list is returned.
"""
try:
if size is None:
rows = await self._cursor.fetchmany()
else:
rows = await self._cursor.fetchmany(size)
except AttributeError:
self._non_result()
else:
ret = self._process_rows(rows)
if len(ret) == 0:
await self.close()
return ret |
python | def pointspace(self, **kwargs):
"""
Returns a dictionary with the keys `data` and `fit`.
`data` is just `scipy_data_fitting.Data.array`.
`fit` is a two row [`numpy.ndarray`][1], the first row values correspond
to the independent variable and are generated using [`numpy.linspace`][2].
The second row are the values of `scipy_data_fitting.Fit.fitted_function`
evaluated on the linspace.
For both `fit` and `data`, each row will be scaled by the corresponding
inverse prefix if given in `scipy_data_fitting.Fit.independent`
or `scipy_data_fitting.Fit.dependent`.
Any keyword arguments are passed to [`numpy.linspace`][2].
[1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html
[2]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html
"""
scale_array = numpy.array([
[prefix_factor(self.independent)**(-1)],
[prefix_factor(self.dependent)**(-1)]
])
linspace = numpy.linspace(self.limits[0], self.limits[1], **kwargs)
return {
'data': self.data.array * scale_array,
'fit': numpy.array([linspace, self.fitted_function(linspace)]) * scale_array
} |
python | def set_mphone_calibration(self, sens, db):
"""Sets the microphone calibration, for the purpose of calculating recorded dB levels
:param sens: microphone sensitivity (V)
:type sens: float
:param db: dB SPL that the calibration was measured at
:type db: int
"""
self.bs_calibrator.set_mphone_calibration(sens, db)
self.tone_calibrator.set_mphone_calibration(sens, db) |
python | def prefix_iter(self, ns_uri):
"""Gets an iterator over the prefixes for the given namespace."""
ni = self.__lookup_uri(ns_uri)
return iter(ni.prefixes) |
python | def get_all_responses(self, service_name, receive_timeout_in_seconds=None):
"""
Receive all available responses from the service as a generator.
:param service_name: The name of the service from which to receive responses
:type service_name: union[str, unicode]
:param receive_timeout_in_seconds: How long to block without receiving a message before raising
`MessageReceiveTimeout` (defaults to five seconds unless the settings are
otherwise).
:type receive_timeout_in_seconds: int
:return: A generator that yields (request ID, job response)
:rtype: generator
:raise: ConnectionError, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, StopIteration
"""
handler = self._get_handler(service_name)
return handler.get_all_responses(receive_timeout_in_seconds) |
python | def _create_update_from_file(mode='create', uuid=None, path=None):
'''
Create vm from file
'''
ret = {}
if not os.path.isfile(path) or path is None:
ret['Error'] = 'File ({0}) does not exists!'.format(path)
return ret
# vmadm validate create|update [-f <filename>]
cmd = 'vmadm validate {mode} {brand} -f {path}'.format(
mode=mode,
brand=get(uuid)['brand'] if uuid is not None else '',
path=path
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = _exit_status(retcode)
if 'stderr' in res:
if res['stderr'][0] == '{':
ret['Error'] = salt.utils.json.loads(res['stderr'])
else:
ret['Error'] = res['stderr']
return ret
# vmadm create|update [-f <filename>]
cmd = 'vmadm {mode} {uuid} -f {path}'.format(
mode=mode,
uuid=uuid if uuid is not None else '',
path=path
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
if retcode != 0:
ret['Error'] = _exit_status(retcode)
if 'stderr' in res:
if res['stderr'][0] == '{':
ret['Error'] = salt.utils.json.loads(res['stderr'])
else:
ret['Error'] = res['stderr']
return ret
else:
if res['stderr'].startswith('Successfully created VM'):
return res['stderr'][24:]
return True |
java | public static int distance(String s1, String s2) {
if (s1.length() == 0)
return s2.length();
if (s2.length() == 0)
return s1.length();
int s1len = s1.length();
// we use a flat array for better performance. we address it by
// s1ix + s1len * s2ix. this modification improves performance
// by about 30%, which is definitely worth the extra complexity.
int[] matrix = new int[(s1len + 1) * (s2.length() + 1)];
for (int col = 0; col <= s2.length(); col++)
matrix[col * s1len] = col;
for (int row = 0; row <= s1len; row++)
matrix[row] = row;
for (int ix1 = 0; ix1 < s1len; ix1++) {
char ch1 = s1.charAt(ix1);
for (int ix2 = 0; ix2 < s2.length(); ix2++) {
int cost;
if (ch1 == s2.charAt(ix2))
cost = 0;
else
cost = 1;
int left = matrix[ix1 + ((ix2 + 1) * s1len)] + 1;
int above = matrix[ix1 + 1 + (ix2 * s1len)] + 1;
int aboveleft = matrix[ix1 + (ix2 * s1len)] + cost;
matrix[ix1 + 1 + ((ix2 + 1) * s1len)] =
Math.min(left, Math.min(above, aboveleft));
}
}
// for (int ix1 = 0; ix1 <= s1len; ix1++) {
// for (int ix2 = 0; ix2 <= s2.length(); ix2++) {
// System.out.print(matrix[ix1 + (ix2 * s1len)] + " ");
// }
// System.out.println();
// }
return matrix[s1len + (s2.length() * s1len)];
} |
java | public java.util.List<MetricDataResult> getMetricDataResults() {
if (metricDataResults == null) {
metricDataResults = new com.amazonaws.internal.SdkInternalList<MetricDataResult>();
}
return metricDataResults;
} |
python | def _api_help(self):
"""Glances API RESTful implementation.
Return the help data or 404 error.
"""
response.content_type = 'application/json; charset=utf-8'
# Update the stat
view_data = self.stats.get_plugin("help").get_view_data()
try:
plist = json.dumps(view_data, sort_keys=True)
except Exception as e:
abort(404, "Cannot get help view data (%s)" % str(e))
return plist |
java | @Override
public double getValue(int idx) {
int vSize = MVecArray.count(varBeliefs);
if (idx < vSize) {
return MVecArray.getValue(idx, varBeliefs);
} else {
return MVecArray.getValue(idx - vSize, facBeliefs);
}
} |
python | def load_module(self, name):
"""
If we get this far, then there are hooks waiting to be called on
import of this module. We manually load the module and then run the
hooks.
@param name: The name of the module to import.
"""
self.loaded_modules.append(name)
try:
__import__(name, {}, {}, [])
mod = sys.modules[name]
self._run_hooks(name, mod)
except:
self.loaded_modules.pop()
raise
return mod |
python | def get_qout(self,
river_id_array=None,
date_search_start=None,
date_search_end=None,
time_index_start=None,
time_index_end=None,
time_index=None,
time_index_array=None,
daily=False,
pd_filter=None,
filter_mode="mean",
as_dataframe=False):
"""
This method extracts streamflow data by a single river ID
or by a river ID array. It has options to extract by date
or by date index.
Parameters
----------
river_id_array: :obj:`numpy.array` or list or int, optional
A single river ID or an array of river IDs.
date_search_start: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the minimum date
for starting.
date_search_end: :obj:`datetime.datetime`, optional
This is a datetime object with the date of the maximum date
for ending.
time_index_start: int, optional
This is the index of the start of the time array subset.
Useful for the old file version.
time_index_end: int, optional
This is the index of the end of the time array subset.
Useful for the old file version.
time_index: int, optional
This is the index of time to return in the case that your
code only wants one index. Used internally.
time_index_array: list or :obj:`numpy.array`, optional
This is used to extract the vales only for particular dates.
This can be from the *get_time_index_range* function.
daily: bool, optional
If true, this will convert qout to daily average.
pd_filter: str, optional
This is a valid pandas resample frequency filter.
filter_mode: str, optional
You can get the daily average "mean" or the maximum "max".
Default is "mean".
as_dataframe: bool, optional
Return as a pandas dataframe object. Default is False.
Returns
-------
qout_array: :obj:`numpy.array`
This is a 1D or 2D array or a single value depending on your
input search.
This example demonstrates how to retrieve the streamflow associated
with the reach you are interested in::
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
river_id = 500
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
streamflow_array = qout_nc.get_qout(river_id)
This example demonstrates how to retrieve the streamflow within a date
range associated with the reach you are interested in::
from RAPIDpy import RAPIDDataset
path_to_rapid_qout = '/path/to/Qout.nc'
river_id = 500
with RAPIDDataset(path_to_rapid_qout) as qout_nc:
streamflow_array = qout_nc.get_qout(
river_id,
date_search_start=datetime(1985,1,1),
date_search_end=datetime(1985,2,4))
"""
# get indices of where the streamflow data is
riverid_index_list_subset = None
if river_id_array is not None:
if not hasattr(river_id_array, "__len__"):
river_id_array = [river_id_array]
riverid_index_list_subset = \
self.get_subset_riverid_index_list(river_id_array)[0]
return self.get_qout_index(riverid_index_list_subset,
date_search_start,
date_search_end,
time_index_start,
time_index_end,
time_index,
time_index_array,
daily,
pd_filter,
filter_mode,
as_dataframe) |
python | def password_attributes_max_retry(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
max_retry = ET.SubElement(password_attributes, "max-retry")
max_retry.text = kwargs.pop('max_retry')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
python | def _windows_cpudata():
'''
Return some CPU information on Windows minions
'''
# Provides:
# num_cpus
# cpu_model
grains = {}
if 'NUMBER_OF_PROCESSORS' in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
grains['num_cpus'] = int(os.environ['NUMBER_OF_PROCESSORS'])
except ValueError:
grains['num_cpus'] = 1
grains['cpu_model'] = salt.utils.win_reg.read_value(
hive="HKEY_LOCAL_MACHINE",
key="HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
vname="ProcessorNameString").get('vdata')
return grains |
python | def subparsers(self):
"""
Insantiates the subparsers for all commands
"""
if self._subparsers is None:
apkw = {
'title': 'commands',
'description': 'Commands for the %s program' % self.parser.prog,
}
self._subparsers = self.parser.add_subparsers(**apkw)
return self._subparsers |
java | public static void validateHostnameOrIpAddress(X509Certificate certificate, String hostname) throws UaException {
boolean dnsNameMatches =
validateSubjectAltNameField(certificate, SUBJECT_ALT_NAME_DNS_NAME, hostname::equals);
boolean ipAddressMatches =
validateSubjectAltNameField(certificate, SUBJECT_ALT_NAME_IP_ADDRESS, hostname::equals);
if (!(dnsNameMatches || ipAddressMatches)) {
throw new UaException(StatusCodes.Bad_CertificateHostNameInvalid);
}
} |
java | public void marshall(CurrentMetricResult currentMetricResult, ProtocolMarshaller protocolMarshaller) {
if (currentMetricResult == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(currentMetricResult.getDimensions(), DIMENSIONS_BINDING);
protocolMarshaller.marshall(currentMetricResult.getCollections(), COLLECTIONS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | private boolean isRecordAlreadyExistsException(SQLException e)
{
// Search in UPPER case
// MySQL 5.0.x - com.mysql.jdbc.exceptions.MySQLIntegrityConstraintViolationException:
// Duplicate entry '4f684b34c0a800030018c34f99165791-0' for key 1
// HSQLDB 8.x - java.sql.SQLException: Violation of unique constraint $$: duplicate value(s) for
// column(s) $$:
// JCR_VCAS_PK in statement [INSERT INTO JCR_VCAS (PROPERTY_ID, ORDER_NUM, CAS_ID)
// VALUES(?,?,?)] String H2_PK_CONSTRAINT_DETECT_PATTERN = "(.*JdbcSQLException.*violation.*PRIMARY_KEY_.*)";
// PostgreSQL 8.2.x - org.postgresql.util.PSQLException: ERROR: duplicate key violates unique
// constraint "jcr_vcas_pk"
// Oracle 9i x64 (on Fedora 7) - java.sql.SQLException: ORA-00001: unique constraint
// (EXOADMIN.JCR_VCAS_PK) violated
// H2 - org.h2.jdbc.JdbcSQLException: Unique index or primary key violation:
// "PRIMARY_KEY_4 ON PUBLIC.JCR_VCAS_TEST(PROPERTY_ID, ORDER_NUM)";
//
String err = e.toString();
if (dialect.startsWith(DBConstants.DB_DIALECT_MYSQL))
{
// for MySQL will search
return MYSQL_PK_CONSTRAINT_DETECT.matcher(err).find();
}
else if (err.toLowerCase().toUpperCase().indexOf(sqlConstraintPK.toLowerCase().toUpperCase()) >= 0)
{
// most of supported dbs prints PK name in exception
return true;
}
else if (dialect.startsWith(DBConstants.DB_DIALECT_DB2))
{
return DB2_PK_CONSTRAINT_DETECT.matcher(err).find();
}
else if (dialect.startsWith(DBConstants.DB_DIALECT_H2))
{
return H2_PK_CONSTRAINT_DETECT.matcher(err).find();
}
// NOTICE! As an additional check we may ask the database for property currently processed in
// VCAS
// and tell true if the property already exists only.
return false;
} |
python | def standardize_strings(arg, strtype=settings.MODERNRPC_PY2_STR_TYPE, encoding=settings.MODERNRPC_PY2_STR_ENCODING):
"""
Python 2 only. Lookup given *arg* and convert its str or unicode value according to MODERNRPC_PY2_STR_TYPE and
MODERNRPC_PY2_STR_ENCODING settings.
"""
assert six.PY2, "This function should be used with Python 2 only"
if not strtype:
return arg
if strtype == six.binary_type or strtype == 'str':
# We want to convert from unicode to str
return _generic_convert_string(arg, six.text_type, six.binary_type, encoding)
elif strtype == six.text_type or strtype == 'unicode':
# We want to convert from str to unicode
return _generic_convert_string(arg, six.binary_type, six.text_type, encoding)
raise TypeError('standardize_strings() called with an invalid strtype: "{}". Allowed values: str or unicode'
.format(repr(strtype))) |
python | def getidfkeyswithnodes():
"""return a list of keys of idfobjects that hve 'None Name' fields"""
idf = IDF(StringIO(""))
keys = idfobjectkeys(idf)
keysfieldnames = ((key, idf.newidfobject(key.upper()).fieldnames)
for key in keys)
keysnodefdnames = ((key, (name for name in fdnames
if (name.endswith('Node_Name'))))
for key, fdnames in keysfieldnames)
nodekeys = [key for key, fdnames in keysnodefdnames if list(fdnames)]
return nodekeys |
python | def target_temperature(self, temperature):
"""Set new target temperature."""
dev_temp = int(temperature * 2)
if temperature == EQ3BT_OFF_TEMP or temperature == EQ3BT_ON_TEMP:
dev_temp |= 0x40
value = struct.pack('BB', PROP_MODE_WRITE, dev_temp)
else:
self._verify_temperature(temperature)
value = struct.pack('BB', PROP_TEMPERATURE_WRITE, dev_temp)
self._conn.make_request(PROP_WRITE_HANDLE, value) |
python | def destripe_plus(inputfile, suffix='strp', stat='pmode1', maxiter=15,
sigrej=2.0, lower=None, upper=None, binwidth=0.3,
scimask1=None, scimask2=None,
dqbits=None, rpt_clean=0, atol=0.01,
cte_correct=True, clobber=False, verbose=True):
r"""Calibrate post-SM4 ACS/WFC exposure(s) and use
standalone :ref:`acsdestripe`.
This takes a RAW image and generates a FLT file containing
its calibrated and destriped counterpart.
If CTE correction is performed, FLC will also be present.
Parameters
----------
inputfile : str or list of str
Input filenames in one of these formats:
* a Python list of filenames
* a partial filename with wildcards ('\*raw.fits')
* filename of an ASN table ('j12345670_asn.fits')
* an at-file (``@input``)
suffix : str
The string to use to add to each input file name to
indicate an output product of ``acs_destripe``.
This only affects the intermediate output file that will
be automatically renamed to ``*blv_tmp.fits`` during the processing.
stat : { 'pmode1', 'pmode2', 'mean', 'mode', 'median', 'midpt' } (Default = 'pmode1')
Specifies the statistics to be used for computation of the
background in image rows:
* 'pmode1' - SEXTRACTOR-like mode estimate based on a
modified `Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``2.5*median-1.5*mean``;
* 'pmode2' - mode estimate based on
`Pearson's rule <http://en.wikipedia.org/wiki/Nonparametric_skew#Pearson.27s_rule>`_:
``3*median-2*mean``;
* 'mean' - the mean of the distribution of the "good" pixels (after
clipping, masking, etc.);
* 'mode' - the mode of the distribution of the "good" pixels;
* 'median' - the median of the distribution of the "good" pixels;
* 'midpt' - estimate of the median of the distribution of the "good"
pixels based on an algorithm similar to IRAF's `imagestats` task
(``CDF(midpt)=1/2``).
.. note::
The midpoint and mode are computed in two passes through the
image. In the first pass the standard deviation of the pixels
is calculated and used with the *binwidth* parameter to compute
the resolution of the data histogram. The midpoint is estimated
by integrating the histogram and computing by interpolation
the data value at which exactly half the pixels are below that
data value and half are above it. The mode is computed by
locating the maximum of the data histogram and fitting the peak
by parabolic interpolation.
maxiter : int
This parameter controls the maximum number of iterations
to perform when computing the statistics used to compute the
row-by-row corrections.
sigrej : float
This parameters sets the sigma level for the rejection applied
during each iteration of statistics computations for the
row-by-row corrections.
lower : float, None (Default = None)
Lower limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
upper : float, None (Default = None)
Upper limit of usable pixel values for computing the background.
This value should be specified in the units of the input image(s).
binwidth : float (Default = 0.1)
Histogram's bin width, in sigma units, used to sample the
distribution of pixel brightness values in order to compute the
background statistics. This parameter is aplicable *only* to *stat*
parameter values of `'mode'` or `'midpt'`.
clobber : bool
Specify whether or not to 'clobber' (delete then replace)
previously generated products with the same names.
scimask1 : str or list of str
Mask images for *calibrated* ``SCI,1``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
scimask2 : str or list of str
Mask images for *calibrated* ``SCI,2``, one for each input file.
Pixels with zero values will be masked out, in addition to clipping.
This is not used for subarrays.
dqbits : int, str, None (Default = None)
Integer sum of all the DQ bit values from the input image's DQ array
that should be considered "good" when building masks for de-striping
computations. For example, if pixels in the DQ array can be
combinations of 1, 2, 4, and 8 flags and one wants to consider
DQ "defects" having flags 2 and 4 as being acceptable for de-striping
computations, then `dqbits` should be set to 2+4=6. Then a DQ pixel
having values 2,4, or 6 will be considered a good pixel, while a DQ
pixel with a value, e.g., 1+2=3, 4+8=12, etc. will be flagged
as a "bad" pixel.
Alternatively, one can enter a comma- or '+'-separated list of
integer bit flags that should be added to obtain the final
"good" bits. For example, both ``4,8`` and ``4+8`` are equivalent to
setting `dqbits` to 12.
| Set `dqbits` to 0 to make *all* non-zero pixels in the DQ
mask to be considered "bad" pixels, and the corresponding image
pixels not to be used for de-striping computations.
| Default value (`None`) will turn off the use of image's DQ array
for de-striping computations.
| In order to reverse the meaning of the `dqbits`
parameter from indicating values of the "good" DQ flags
to indicating the "bad" DQ flags, prepend '~' to the string
value. For example, in order not to use pixels with
DQ flags 4 and 8 for sky computations and to consider
as "good" all other pixels (regardless of their DQ flag),
set `dqbits` to ``~4+8``, or ``~4,8``. To obtain the
same effect with an `int` input value (except for 0),
enter -(4+8+1)=-9. Following this convention,
a `dqbits` string value of ``'~0'`` would be equivalent to
setting ``dqbits=None``.
.. note::
DQ masks (if used), *will be* combined with user masks specified
in the `scimask1` and `scimask2` parameters (if any).
rpt_clean : int
An integer indicating how many *additional* times stripe cleaning
should be performed on the input image. Default = 0.
atol : float, None
The threshold for maximum absolute value of bias stripe correction
below which repeated cleanings can stop. When `atol` is `None`
cleaning will be repeated `rpt_clean` number of times.
Default = 0.01 [e].
cte_correct : bool
Perform CTE correction.
verbose : bool
Print informational messages. Default = True.
Raises
------
ImportError
``stsci.tools`` not found.
IOError
Input file does not exist.
ValueError
Invalid header values or CALACS version.
"""
# Optional package dependencies
from stsci.tools import parseinput
try:
from stsci.tools.bitmask import interpret_bit_flags
except ImportError:
from stsci.tools.bitmask import (
interpret_bits_value as interpret_bit_flags
)
# process input file(s) and if we have multiple input files - recursively
# call acs_destripe_plus for each input image:
flist = parseinput.parseinput(inputfile)[0]
if isinstance(scimask1, str):
mlist1 = parseinput.parseinput(scimask1)[0]
elif isinstance(scimask1, np.ndarray):
mlist1 = [scimask1.copy()]
elif scimask1 is None:
mlist1 = []
elif isinstance(scimask1, list):
mlist1 = []
for m in scimask1:
if isinstance(m, np.ndarray):
mlist1.append(m.copy())
elif isinstance(m, str):
mlist1 += parseinput.parseinput(m)[0]
else:
raise TypeError("'scimask1' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'scimask1' must be either a str, or a "
"numpy.ndarray, or a list of the two type of "
"values.")
if isinstance(scimask2, str):
mlist2 = parseinput.parseinput(scimask2)[0]
elif isinstance(scimask2, np.ndarray):
mlist2 = [scimask2.copy()]
elif scimask2 is None:
mlist2 = []
elif isinstance(scimask2, list):
mlist2 = []
for m in scimask2:
if isinstance(m, np.ndarray):
mlist2.append(m.copy())
elif isinstance(m, str):
mlist2 += parseinput.parseinput(m)[0]
else:
raise TypeError("'scimask2' must be a list of str or "
"numpy.ndarray values.")
else:
raise TypeError("'scimask2' must be either a str, or a "
"numpy.ndarray, or a list of the two type of "
"values.")
n_input = len(flist)
n_mask1 = len(mlist1)
n_mask2 = len(mlist2)
if n_input == 0:
raise ValueError(
'No input file(s) provided or the file(s) do not exist')
if n_mask1 == 0:
mlist1 = [None] * n_input
elif n_mask1 != n_input:
raise ValueError('Insufficient masks for [SCI,1]')
if n_mask2 == 0:
mlist2 = [None] * n_input
elif n_mask2 != n_input:
raise ValueError('Insufficient masks for [SCI,2]')
if n_input > 1:
for img, mf1, mf2 in zip(flist, mlist1, mlist2):
destripe_plus(
inputfile=img, suffix=suffix, stat=stat,
lower=lower, upper=upper, binwidth=binwidth,
maxiter=maxiter, sigrej=sigrej,
scimask1=scimask1, scimask2=scimask2, dqbits=dqbits,
cte_correct=cte_correct, clobber=clobber, verbose=verbose
)
return
inputfile = flist[0]
scimask1 = mlist1[0]
scimask2 = mlist2[0]
# verify that the RAW image exists in cwd
cwddir = os.getcwd()
if not os.path.exists(os.path.join(cwddir, inputfile)):
raise IOError("{0} does not exist.".format(inputfile))
# get image's primary header:
header = fits.getheader(inputfile)
# verify masks defined (or not) simultaneously:
if (header['CCDAMP'] == 'ABCD' and
((scimask1 is not None and scimask2 is None) or
(scimask1 is None and scimask2 is not None))):
raise ValueError("Both 'scimask1' and 'scimask2' must be specified "
"or not specified together.")
calacs_str = subprocess.check_output(['calacs.e', '--version']).split()[0]
calacs_ver = [int(x) for x in calacs_str.decode().split('.')]
if calacs_ver < [8, 3, 1]:
raise ValueError('CALACS {0} is incomptible. '
'Must be 8.3.1 or later.'.format(calacs_str))
# check date for post-SM4 and if supported subarray or full frame
is_subarray = False
ctecorr = header['PCTECORR']
aperture = header['APERTURE']
detector = header['DETECTOR']
date_obs = Time(header['DATE-OBS'])
# intermediate filenames
blvtmp_name = inputfile.replace('raw', 'blv_tmp')
blctmp_name = inputfile.replace('raw', 'blc_tmp')
# output filenames
tra_name = inputfile.replace('_raw.fits', '.tra')
flt_name = inputfile.replace('raw', 'flt')
flc_name = inputfile.replace('raw', 'flc')
if detector != 'WFC':
raise ValueError("{0} is not a WFC image, please check the 'DETECTOR'"
" keyword.".format(inputfile))
if date_obs < SM4_DATE:
raise ValueError(
"{0} is a pre-SM4 image.".format(inputfile))
if header['SUBARRAY'] and cte_correct:
if aperture in SUBARRAY_LIST:
is_subarray = True
else:
LOG.warning('Using non-supported subarray, '
'turning CTE correction off')
cte_correct = False
# delete files from previous CALACS runs
if clobber:
for tmpfilename in [blvtmp_name, blctmp_name, flt_name, flc_name,
tra_name]:
if os.path.exists(tmpfilename):
os.remove(tmpfilename)
# run ACSCCD on RAW
acsccd.acsccd(inputfile)
# modify user mask with DQ masks if requested
dqbits = interpret_bit_flags(dqbits)
if dqbits is not None:
# save 'tra' file in memory to trick the log file
# not to save first acs2d log as this is done only
# for the purpose of obtaining DQ masks.
# WISH: it would have been nice is there was an easy way of obtaining
# just the DQ masks as if data were calibrated but without
# having to recalibrate them with acs2d.
if os.path.isfile(tra_name):
with open(tra_name) as fh:
tra_lines = fh.readlines()
else:
tra_lines = None
# apply flats, etc.
acs2d.acs2d(blvtmp_name, verbose=False, quiet=True)
# extract DQ arrays from the FLT image:
dq1, dq2 = _read_DQ_arrays(flt_name)
mask1 = _get_mask(scimask1, 1)
scimask1 = acs_destripe._mergeUserMaskAndDQ(dq1, mask1, dqbits)
mask2 = _get_mask(scimask2, 2)
if dq2 is not None:
scimask2 = acs_destripe._mergeUserMaskAndDQ(dq2, mask2, dqbits)
elif mask2 is None:
scimask2 = None
# reconstruct trailer file:
if tra_lines is not None:
with open(tra_name, mode='w') as fh:
fh.writelines(tra_lines)
# delete temporary FLT image:
if os.path.isfile(flt_name):
os.remove(flt_name)
# execute destriping (post-SM4 data only)
acs_destripe.clean(
blvtmp_name, suffix, stat=stat, maxiter=maxiter, sigrej=sigrej,
lower=lower, upper=upper, binwidth=binwidth,
mask1=scimask1, mask2=scimask2, dqbits=dqbits,
rpt_clean=rpt_clean, atol=atol, clobber=clobber, verbose=verbose)
blvtmpsfx = 'blv_tmp_{0}'.format(suffix)
os.rename(inputfile.replace('raw', blvtmpsfx), blvtmp_name)
# update subarray header
if is_subarray and cte_correct:
fits.setval(blvtmp_name, 'PCTECORR', value='PERFORM')
ctecorr = 'PERFORM'
# perform CTE correction on destriped image
if cte_correct:
if ctecorr == 'PERFORM':
acscte.acscte(blvtmp_name)
else:
LOG.warning(
"PCTECORR={0}, cannot run CTE correction".format(ctecorr))
cte_correct = False
# run ACS2D to get FLT and FLC images
acs2d.acs2d(blvtmp_name)
if cte_correct:
acs2d.acs2d(blctmp_name)
# delete intermediate files
os.remove(blvtmp_name)
if cte_correct and os.path.isfile(blctmp_name):
os.remove(blctmp_name)
info_str = 'Done.\nFLT: {0}\n'.format(flt_name)
if cte_correct:
info_str += 'FLC: {0}\n'.format(flc_name)
LOG.info(info_str) |
java | public static <T> JSONObject getJsonFromObject(T t)
throws IllegalAccessException, InstantiationException, InvocationTargetException {
Field[] fields = AnnotationUtils.filterDeepFields(t.getClass());
JSONObject json = new JSONObject();
for (Field field : fields) {
Method method = Utils.findGetter(field.getName(), t.getClass());
Object object = method.invoke(t);
if (object != null) {
if (Collection.class.isAssignableFrom(field.getType())) {
Collection c = (Collection) object;
Iterator iterator = c.iterator();
List<JSONObject> innerJsonList = new ArrayList<>();
while (iterator.hasNext()) {
innerJsonList.add(getJsonFromObject((IDeepType) iterator.next()));
}
json.put(AnnotationUtils.deepFieldName(field), innerJsonList);
} else if (IDeepType.class.isAssignableFrom(field.getType())) {
json.put(AnnotationUtils.deepFieldName(field), getJsonFromObject((IDeepType) object));
} else {
json.put(AnnotationUtils.deepFieldName(field), object);
}
}
}
return json;
} |
python | def parse_args(cliargs):
"""Parse the command line arguments and return a list of the positional
arguments and a dictionary with the named ones.
>>> parse_args(["abc", "def", "-w", "3", "--foo", "bar", "-narf=zort"])
(['abc', 'def'], {'w': '3', 'foo': 'bar', 'narf': 'zort'})
>>> parse_args(["-abc"])
([], {'abc': True})
>>> parse_args(["-f", "1", "-f", "2", "-f", "3"])
([], {'f': ['1', '2', '3']})
"""
# Split the "key=arg" arguments
largs = []
for arg in cliargs:
if "=" in arg:
key, arg = arg.split("=")
largs.append(key)
largs.append(arg)
args = []
flags = []
kwargs = {}
key = None
for sarg in largs:
if is_key(sarg):
if key is not None:
flags.append(key)
key = sarg.strip("-")
continue
if not key:
args.append(sarg)
continue
value = kwargs.get(key)
if value:
if isinstance(value, list):
value.append(sarg)
else:
value = [value, sarg]
kwargs[key] = value
else:
kwargs[key] = sarg
# Get the flags
if key:
flags.append(key)
# An extra key whitout a value is a flag if it hasn"t been used before.
# Otherwise is a typo.
for flag in flags:
if not kwargs.get(flag):
kwargs[flag] = True
return args, kwargs |
python | def combine_duplicate_stmts(stmts):
"""Combine evidence from duplicate Statements.
Statements are deemed to be duplicates if they have the same key
returned by the `matches_key()` method of the Statement class. This
generally means that statements must be identical in terms of their
arguments and can differ only in their associated `Evidence` objects.
This function keeps the first instance of each set of duplicate
statements and merges the lists of Evidence from all of the other
statements.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Set of statements to de-duplicate.
Returns
-------
list of :py:class:`indra.statements.Statement`
Unique statements with accumulated evidence across duplicates.
Examples
--------
De-duplicate and combine evidence for two statements differing only
in their evidence lists:
>>> map2k1 = Agent('MAP2K1')
>>> mapk1 = Agent('MAPK1')
>>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 1')])
>>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185',
... evidence=[Evidence(text='evidence 2')])
>>> uniq_stmts = Preassembler.combine_duplicate_stmts([stmt1, stmt2])
>>> uniq_stmts
[Phosphorylation(MAP2K1(), MAPK1(), T, 185)]
>>> sorted([e.text for e in uniq_stmts[0].evidence]) # doctest:+IGNORE_UNICODE
['evidence 1', 'evidence 2']
"""
# Helper function to get a list of evidence matches keys
def _ev_keys(sts):
ev_keys = []
for stmt in sts:
for ev in stmt.evidence:
ev_keys.append(ev.matches_key())
return ev_keys
# Iterate over groups of duplicate statements
unique_stmts = []
for _, duplicates in Preassembler._get_stmt_matching_groups(stmts):
ev_keys = set()
# Get the first statement and add the evidence of all subsequent
# Statements to it
duplicates = list(duplicates)
start_ev_keys = _ev_keys(duplicates)
for stmt_ix, stmt in enumerate(duplicates):
if stmt_ix is 0:
new_stmt = stmt.make_generic_copy()
if len(duplicates) == 1:
new_stmt.uuid = stmt.uuid
raw_text = [None if ag is None else ag.db_refs.get('TEXT')
for ag in stmt.agent_list(deep_sorted=True)]
raw_grounding = [None if ag is None else ag.db_refs
for ag in stmt.agent_list(deep_sorted=True)]
for ev in stmt.evidence:
ev_key = ev.matches_key() + str(raw_text) + \
str(raw_grounding)
if ev_key not in ev_keys:
# In case there are already agents annotations, we
# just add a new key for raw_text, otherwise create
# a new key
if 'agents' in ev.annotations:
ev.annotations['agents']['raw_text'] = raw_text
ev.annotations['agents']['raw_grounding'] = \
raw_grounding
else:
ev.annotations['agents'] = \
{'raw_text': raw_text,
'raw_grounding': raw_grounding}
if 'prior_uuids' not in ev.annotations:
ev.annotations['prior_uuids'] = []
ev.annotations['prior_uuids'].append(stmt.uuid)
new_stmt.evidence.append(ev)
ev_keys.add(ev_key)
end_ev_keys = _ev_keys([new_stmt])
if len(end_ev_keys) != len(start_ev_keys):
logger.debug('%d redundant evidences eliminated.' %
(len(start_ev_keys) - len(end_ev_keys)))
# This should never be None or anything else
assert isinstance(new_stmt, Statement)
unique_stmts.append(new_stmt)
return unique_stmts |
java | public void resetResult(Object result)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(tc, "resetResult");
if (result != null)
((TopicAclTraversalResults) result).reset();
// ((ArrayList) result).clear();
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "resetResult");
} |
python | def send(mail, server='localhost'):
"""
Sends the given mail.
:type mail: Mail
:param mail: The mail object.
:type server: string
:param server: The address of the mailserver.
"""
sender = mail.get_sender()
rcpt = mail.get_receipients()
session = smtplib.SMTP(server)
message = MIMEMultipart()
message['Subject'] = mail.get_subject()
message['From'] = mail.get_sender()
message['To'] = ', '.join(mail.get_to())
message['Cc'] = ', '.join(mail.get_cc())
message.preamble = 'Your mail client is not MIME aware.'
body = MIMEText(mail.get_body().encode("utf-8"), "plain", "utf-8")
body.add_header('Content-Disposition', 'inline')
message.attach(body)
for filename in mail.get_attachments():
message.attach(_get_mime_object(filename))
session.sendmail(sender, rcpt, message.as_string()) |
python | def __get_condition(self, url):
"""
Gets the condition for a url and validates it.
:param str url: The url to get the condition for
"""
if self.__heuristics_condition is not None:
return self.__heuristics_condition
if "pass_heuristics_condition" in self.__sites_object[url]:
condition = \
self.__sites_object[url]["pass_heuristics_condition"]
else:
condition = \
self.cfg_heuristics["pass_heuristics_condition"]
# Because the condition will be eval-ed (Yeah, eval is evil, BUT only
# when not filtered properly), we are filtering it here.
# Anyway, if that filter-method is not perfect: This is not any
# random user-input thats evaled. This is (hopefully still when you
# read this) not a webtool, where you need to filter everything 100%
# properly.
disalloweds = condition
heuristics = self.__get_enabled_heuristics(url)
for allowed in self.__condition_allowed:
disalloweds = disalloweds.replace(allowed, " ")
for heuristic, _ in heuristics.items():
disalloweds = re.sub(r"\b%s\b" % heuristic, " ", disalloweds)
disalloweds = disalloweds.split(" ")
for disallowed in disalloweds:
if disallowed != "":
self.log.error("Misconfiguration: In the condition,"
" an unknown heuristic was found and"
" will be ignored: %s", disallowed)
condition = re.sub(r"\b%s\b" % disallowed, "True", condition)
self.__heuristics_condition = condition
# Now condition should just consits of not, and, or, (, ), and all
# enabled heuristics.
return condition |
python | def _comparable(self):
"""Get a comparable version of the DatasetID.
Without this DatasetIDs often raise an exception when compared in
Python 3 due to None not being comparable with other types.
"""
return self._replace(
name='' if self.name is None else self.name,
wavelength=tuple() if self.wavelength is None else self.wavelength,
resolution=0 if self.resolution is None else self.resolution,
polarization='' if self.polarization is None else self.polarization,
calibration='' if self.calibration is None else self.calibration,
) |
python | def enbase64(byte_str):
"""
Encode bytes/strings to base64.
Args:
- ``byte_str``: The string or bytes to base64 encode.
Returns:
- byte_str encoded as base64.
"""
# Python 3: base64.b64encode() expects type byte
if isinstance(byte_str, str) and not PYTHON2:
byte_str = bytes(byte_str, 'utf-8')
return base64.b64encode(byte_str) |
java | static String unescape(final String text, final UriEscapeType escapeType, final String encoding) {
if (text == null) {
return null;
}
StringBuilder strBuilder = null;
final int offset = 0;
final int max = text.length();
int readOffset = offset;
for (int i = offset; i < max; i++) {
final char c = text.charAt(i);
/*
* Check the need for an unescape operation at this point
*/
if (c != ESCAPE_PREFIX && (c != '+' || !escapeType.canPlusEscapeWhitespace())) {
continue;
}
/*
* At this point we know for sure we will need some kind of unescape, so we
* can increase the offset and initialize the string builder if needed, along with
* copying to it all the contents pending up to this point.
*/
if (strBuilder == null) {
strBuilder = new StringBuilder(max + 5);
}
if (i - readOffset > 0) {
strBuilder.append(text, readOffset, i);
}
/*
* Deal with possible '+'-escaped whitespace (application/x-www-form-urlencoded)
*/
if (c == '+') {
// if we reached this point with c == '+', it's escaping a whitespace
strBuilder.append(' ');
readOffset = i + 1;
continue;
}
/*
* ESCAPE PROCESS
* --------------
* If there are more than one percent-encoded/escaped sequences together, we will
* need to unescape them all at once (because they might be bytes --up to 4-- of
* the same char).
*/
// Max possible size will be the remaining amount of chars / 3
final byte[] bytes = new byte[(max-i)/3];
char aheadC = c;
int pos = 0;
while (((i + 2) < max) && aheadC == ESCAPE_PREFIX) {
bytes[pos++] = parseHexa(text.charAt(i + 1), text.charAt(i + 2));
i += 3;
if (i < max) {
aheadC = text.charAt(i);
}
}
if (i < max && aheadC == ESCAPE_PREFIX) {
// Incomplete escape sequence!
throw new IllegalArgumentException("Incomplete escaping sequence in input");
}
try {
strBuilder.append(new String(bytes, 0, pos, encoding));
} catch (final UnsupportedEncodingException e) {
throw new IllegalArgumentException("Exception while escaping URI: Bad encoding '" + encoding + "'", e);
}
readOffset = i;
}
/*
* -----------------------------------------------------------------------------------------------
* Final cleaning: return the original String object if no unescape was actually needed. Otherwise
* append the remaining escaped text to the string builder and return.
* -----------------------------------------------------------------------------------------------
*/
if (strBuilder == null) {
return text;
}
if (max - readOffset > 0) {
strBuilder.append(text, readOffset, max);
}
return strBuilder.toString();
} |
java | protected static <P extends ParaObject> void batchGet(Map<String, KeysAndAttributes> kna, Map<String, P> results) {
if (kna == null || kna.isEmpty() || results == null) {
return;
}
try {
BatchGetItemResult result = getClient().batchGetItem(new BatchGetItemRequest().
withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL).withRequestItems(kna));
if (result == null) {
return;
}
List<Map<String, AttributeValue>> res = result.getResponses().get(kna.keySet().iterator().next());
for (Map<String, AttributeValue> item : res) {
P obj = fromRow(item);
if (obj != null) {
results.put(obj.getId(), obj);
}
}
logger.debug("batchGet(): total {}, cc {}", res.size(), result.getConsumedCapacity());
if (result.getUnprocessedKeys() != null && !result.getUnprocessedKeys().isEmpty()) {
Thread.sleep(1000);
logger.warn("{} UNPROCESSED read requests!", result.getUnprocessedKeys().size());
batchGet(result.getUnprocessedKeys(), results);
}
} catch (Exception e) {
logger.error(null, e);
}
} |
java | public static List<Field> getFirstLevelOfReferenceAttributes(Class<?> clazz) {
List<Field> references = new ArrayList<Field>();
List<String> referencedFields = ReflectionUtils.getReferencedAttributeNames(clazz);
for(String eachReference : referencedFields) {
Field referenceField = ReflectionUtils.getField(clazz, eachReference);
references.add(referenceField);
}
return references;
} |
java | @Override
public synchronized List<RecordContext> getDataForContext (int contextId) throws DatabaseException {
try {
List<RecordContext> result = new ArrayList<>();
psGetAllDataForContext.setInt(1, contextId);
try (ResultSet rs = psGetAllDataForContext.executeQuery()) {
while (rs.next()) {
result.add(new RecordContext(rs.getLong(DATAID), rs.getInt(CONTEXTID), rs.getInt(TYPE), rs.getString(DATA)));
}
}
return result;
} catch (SQLException e) {
throw new DatabaseException(e);
}
} |
python | def add_relationship(
self,
entity1_ilx: str,
relationship_ilx: str,
entity2_ilx: str) -> dict:
""" Adds relationship connection in Interlex
A relationship exists as 3 different parts:
1. entity with type term, cde, fde, or pde
2. entity with type relationship that connects entity1 to entity2
-> Has its' own meta data, so no value needed
3. entity with type term, cde, fde, or pde
"""
url = self.base_url + 'term/add-relationship'
entity1_data = self.get_entity(entity1_ilx)
if not entity1_data['id']:
exit(
'entity1_ilx: ' + entity1_ilx + ' does not exist'
)
relationship_data = self.get_entity(relationship_ilx)
if not relationship_data['id']:
exit(
'relationship_ilx: ' + relationship_ilx + ' does not exist'
)
entity2_data = self.get_entity(entity2_ilx)
if not entity2_data['id']:
exit(
'entity2_ilx: ' + entity2_ilx + ' does not exist'
)
data = {
'term1_id': entity1_data['id'],
'relationship_tid': relationship_data['id'],
'term2_id': entity2_data['id'],
'term1_version': entity1_data['version'],
'term2_version': entity2_data['version'],
'relationship_term_version': relationship_data['version'],
'orig_uid': self.user_id, # BUG: php lacks orig_uid update
}
output = self.post(
url = url,
data = data,
)
### If already exists, we return the actual relationship properly ###
if output.get('errormsg'):
if 'already exists' in output['errormsg'].lower():
term_relationships = self.get_relationship_via_tid(entity1_data['id'])
for term_relationship in term_relationships:
if str(term_relationship['term2_id']) == str(entity2_data['id']):
if term_relationship['relationship_tid'] == relationship_data['id']:
print(
'relationship: [' + entity1_data['label'] + ' -> ' +
relationship_data['label'] + ' -> ' + entity2_data['label'] +
'], already exists.'
)
return term_relationship
exit(output)
exit(output)
return output |
python | def hypot(x, y, context=None):
"""
Return the Euclidean norm of x and y, i.e., the square root of the sum of
the squares of x and y.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_hypot,
(
BigFloat._implicit_convert(x),
BigFloat._implicit_convert(y),
),
context,
) |
python | def reparse_login_cookie_after_region_update(self, login_response):
"""
Sometimes, login cookie gets sent with region info instead of api.cloudgenix.com. This function
re-parses the original login request and applies cookies to the session if they now match the new region.
**Parameters:**
- **login_response:** requests.Response from a non-region login.
**Returns:** updates API() object directly, no return.
"""
login_url = login_response.request.url
api_logger.debug("ORIGINAL REQUEST URL = %s", login_url)
# replace old controller with new controller.
login_url_new = login_url.replace(self.controller_orig, self.controller)
api_logger.debug("UPDATED REQUEST URL = %s", login_url_new)
# reset login url with new region
login_response.request.url = login_url_new
# prep cookie jar parsing
req = requests.cookies.MockRequest(login_response.request)
res = requests.cookies.MockResponse(login_response.raw._original_response.msg)
# extract cookies to session cookie jar.
self._session.cookies.extract_cookies(res, req)
return |
python | def parse_args(self, *args, **kwargs):
"""Parse the arguments as usual, then add default processing."""
if _debug: ConfigArgumentParser._debug("parse_args")
# pass along to the parent class
result_args = ArgumentParser.parse_args(self, *args, **kwargs)
# read in the configuration file
config = _ConfigParser()
config.read(result_args.ini)
if _debug: _log.debug(" - config: %r", config)
# check for BACpypes section
if not config.has_section('BACpypes'):
raise RuntimeError("INI file with BACpypes section required")
# convert the contents to an object
ini_obj = type('ini', (object,), dict(config.items('BACpypes')))
if _debug: _log.debug(" - ini_obj: %r", ini_obj)
# add the object to the parsed arguments
setattr(result_args, 'ini', ini_obj)
# return what was parsed
return result_args |
java | private JpaSoftwareModule touch(final SoftwareModule latestModule) {
// merge base distribution set so optLockRevision gets updated and audit
// log written because modifying metadata is modifying the base
// distribution set itself for auditing purposes.
final JpaSoftwareModule result = entityManager.merge((JpaSoftwareModule) latestModule);
result.setLastModifiedAt(0L);
return result;
} |
java | public void handle(final HttpExchange pHttpExchange) throws IOException {
try {
checkAuthentication(pHttpExchange);
Subject subject = (Subject) pHttpExchange.getAttribute(ConfigKey.JAAS_SUBJECT_REQUEST_ATTRIBUTE);
if (subject != null) {
doHandleAs(subject, pHttpExchange);
} else {
doHandle(pHttpExchange);
}
} catch (SecurityException exp) {
sendForbidden(pHttpExchange,exp);
}
} |
java | public static Map<String,List<String>> findImages( File rootDir ) {
File files[] = rootDir.listFiles();
if( files == null )
return null;
List<File> imageDirectories = new ArrayList<>();
for( File f : files ) {
if( f.isDirectory() ) {
imageDirectories.add(f);
}
}
Map<String,List<String>> out = new HashMap<>();
for( File d : imageDirectories ) {
List<String> images = new ArrayList<>();
files = d.listFiles();
if( files == null )
throw new RuntimeException("Should be a directory!");
for( File f : files ) {
if( f.isHidden() || f.isDirectory() || f.getName().endsWith(".txt") ) {
continue;
}
images.add( f.getPath() );
}
String key = d.getName().toLowerCase();
out.put(key,images);
}
return out;
} |
python | def startLoop():
"""
Use nested asyncio event loop for Jupyter notebooks.
"""
def _ipython_loop_asyncio(kernel):
'''
Use asyncio event loop for the given IPython kernel.
'''
loop = asyncio.get_event_loop()
def kernel_handler():
kernel.do_one_iteration()
loop.call_later(kernel._poll_interval, kernel_handler)
loop.call_soon(kernel_handler)
try:
if not loop.is_running():
loop.run_forever()
finally:
if not loop.is_running():
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
patchAsyncio()
loop = asyncio.get_event_loop()
if not loop.is_running():
from ipykernel.eventloops import register_integration, enable_gui
register_integration('asyncio')(_ipython_loop_asyncio)
enable_gui('asyncio') |
java | public DefaultShardManagerBuilder setGatewayPool(ScheduledExecutorService pool, boolean automaticShutdown)
{
return setGatewayPoolProvider(pool == null ? null : new ThreadPoolProviderImpl<>(pool, automaticShutdown));
} |
java | public static void reportOpenTransactions(String scope, String streamName, int ongoingTransactions) {
DYNAMIC_LOGGER.reportGaugeValue(OPEN_TRANSACTIONS, ongoingTransactions, streamTags(scope, streamName));
} |
python | def float2int(x):
"""
converts floats to int when only float() is not enough.
:param x: float
"""
if not pd.isnull(x):
if is_numeric(x):
x=int(x)
return x |
python | def by_classifiers(cls, session, classifiers):
"""
Get releases for given classifiers.
:param session: SQLAlchemy session
:type session: :class:`sqlalchemy.Session`
:param classifiers: classifiers
:type classifiers: unicode
:return: release instances
:rtype: generator of :class:`pyshop.models.Release`
"""
return cls.find(session,
join=(cls.classifiers,),
where=(Classifier.name.in_(classifiers),),
) |
python | def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T:
"""
Overrides the parent method to add log messages.
:param logger: the logger to use during parsing (optional: None is supported)
:param options:
:return:
"""
in_root_call = False
if logger is not None:
# log only for the root object, not for the children that will be created by the code below
if not hasattr(_BaseParsingPlan.thrd_locals, 'flag_exec') \
or _BaseParsingPlan.thrd_locals.flag_exec == 0:
# print('Executing Parsing Plan for ' + str(self))
logger.debug('Executing Parsing Plan for [{location}]'
''.format(location=self.obj_on_fs_to_parse.get_pretty_location(append_file_ext=False)))
_BaseParsingPlan.thrd_locals.flag_exec = 1
in_root_call = True
# Common log message
logger.debug('(P) ' + get_parsing_plan_log_str(self.obj_on_fs_to_parse, self.obj_type,
log_only_last=not in_root_call, parser=self.parser))
try:
res = super(_BaseParsingPlan, self).execute(logger, options)
if logger.isEnabledFor(DEBUG):
logger.info('(P) {loc} -> {type} SUCCESS !'
''.format(loc=self.obj_on_fs_to_parse.get_pretty_location(
blank_parent_part=not GLOBAL_CONFIG.full_paths_in_logs,
compact_file_ext=True),
type=get_pretty_type_str(self.obj_type)))
else:
logger.info('SUCCESS parsed [{loc}] as a [{type}] successfully. Parser used was [{parser}]'
''.format(loc=self.obj_on_fs_to_parse.get_pretty_location(compact_file_ext=True),
type=get_pretty_type_str(self.obj_type),
parser=str(self.parser)))
if in_root_call:
# print('Completed parsing successfully')
logger.debug('Completed parsing successfully')
return res
finally:
# remove threadlocal flag if needed
if in_root_call:
_BaseParsingPlan.thrd_locals.flag_exec = 0 |
java | public static void uncompressDirectory(File srcZipPath, File dstDirPath) throws IOException
{
ZipInputStream in = new ZipInputStream(new FileInputStream(srcZipPath));
ZipEntry entry = null;
try
{
while ((entry = in.getNextEntry()) != null)
{
File dstFile = new File(dstDirPath, entry.getName());
dstFile.getParentFile().mkdirs();
if (entry.isDirectory())
{
dstFile.mkdirs();
}
else
{
OutputStream out = new FileOutputStream(dstFile);
try
{
transfer(in, out);
}
finally
{
out.close();
}
}
}
}
finally
{
if (in != null)
{
in.close();
}
}
} |
java | public PooledObject<T> get() {
T data = pool.pollLast();
if (data == null) {
data = function.get();
}
return new PooledObjectImpl<>(data, pool);
} |
python | def clusters(l, K):
"""Partition list ``l`` in ``K`` partitions.
>>> l = [0, 1, 2]
>>> list(clusters(l, K=3))
[[[0], [1], [2]], [[], [0, 1], [2]], [[], [1], [0, 2]], [[0], [], [1, 2]], [[], [0], [1, 2]], [[], [], [0, 1, 2]]]
>>> list(clusters(l, K=2))
[[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]], [[], [0, 1, 2]]]
>>> list(clusters(l, K=1))
[[[0, 1, 2]]]
"""
if l:
prev = None
for t in clusters(l[1:], K):
tup = sorted(t)
if tup != prev:
prev = tup
for i in range(K):
yield tup[:i] + [[l[0]] + tup[i], ] + tup[i+1:]
else:
yield [[] for _ in range(K)] |
python | def _normalize_sv_coverage_gatk(group_id, inputs, backgrounds, work_dir, back_files, out_files):
"""Normalize CNV coverage using panel of normals with GATK's de-noise approaches.
"""
input_backs = set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "gatk-cnv") for d in inputs]))
if input_backs:
assert len(input_backs) == 1, "Multiple backgrounds in group: %s" % list(input_backs)
pon = list(input_backs)[0]
elif backgrounds:
pon = gatkcnv.create_panel_of_normals(backgrounds, group_id, work_dir)
else:
pon = None
for data in inputs:
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "bins"))
denoise_file = gatkcnv.denoise(data, pon, work_dir)
out_files[dd.get_sample_name(data)] = denoise_file
back_files[dd.get_sample_name(data)] = pon
return back_files, out_files |
java | @Override
public boolean isTrailersReady() {
if (!message.isChunkedEncodingSet()
|| !message.containsHeader(HttpHeaderKeys.HDR_TRAILER)
|| ((HttpBaseMessageImpl) message).getTrailersImpl() != null
|| (message.getVersionValue().getMajor() <= 1 && message.getVersionValue().getMinor() < 1))
return true;
return false;
} |
java | public void marshall(ImportCertificateRequest importCertificateRequest, ProtocolMarshaller protocolMarshaller) {
if (importCertificateRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(importCertificateRequest.getCertificateArn(), CERTIFICATEARN_BINDING);
protocolMarshaller.marshall(importCertificateRequest.getCertificate(), CERTIFICATE_BINDING);
protocolMarshaller.marshall(importCertificateRequest.getPrivateKey(), PRIVATEKEY_BINDING);
protocolMarshaller.marshall(importCertificateRequest.getCertificateChain(), CERTIFICATECHAIN_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public static String byteRegexToString(final String regexp) {
final StringBuilder buf = new StringBuilder();
for (int i = 0; i < regexp.length(); i++) {
if (i > 0 && regexp.charAt(i - 1) == 'Q') {
if (regexp.charAt(i - 3) == '*') {
// tagk
byte[] tagk = new byte[TSDB.tagk_width()];
for (int x = 0; x < TSDB.tagk_width(); x++) {
tagk[x] = (byte)regexp.charAt(i + x);
}
i += TSDB.tagk_width();
buf.append(Arrays.toString(tagk));
} else {
// tagv
byte[] tagv = new byte[TSDB.tagv_width()];
for (int x = 0; x < TSDB.tagv_width(); x++) {
tagv[x] = (byte)regexp.charAt(i + x);
}
i += TSDB.tagv_width();
buf.append(Arrays.toString(tagv));
}
} else {
buf.append(regexp.charAt(i));
}
}
return buf.toString();
} |
java | public static AtomContactSet getAtomsInContact(Chain chain, double cutoff) {
return getAtomsInContact(chain, (String[]) null, cutoff);
} |
python | def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--vmname', '-n', required=True, action='store', help='Name')
arg_parser.add_argument('--rgname', '-g', required=True, action='store',
help='Resource Group Name')
arg_parser.add_argument('--user', '-u', required=False, action='store', default='azure',
help='Optional username')
arg_parser.add_argument('--password', '-p', required=False, action='store',
help='Optional password')
arg_parser.add_argument('--sshkey', '-k', required=False, action='store',
help='SSH public key')
arg_parser.add_argument('--sshpath', '-s', required=False, action='store',
help='SSH public key file path')
arg_parser.add_argument('--location', '-l', required=False, action='store',
help='Location, e.g. eastus')
arg_parser.add_argument('--vmsize', required=False, action='store', default='Standard_D1_V2',
help='VM size, defaults to Standard_D1_V2')
arg_parser.add_argument('--dns', '-d', required=False, action='store',
help='DNS, e.g. myuniquename')
arg_parser.add_argument('--vnet', required=False, action='store',
help='Optional VNET Name (else first VNET in resource group used)')
arg_parser.add_argument('--nowait', action='store_true', default=False,
help='Do not wait for VM to finish provisioning')
arg_parser.add_argument('--nonsg', action='store_true', default=False,
help='Do not create a network security group on the NIC')
arg_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print operational details')
args = arg_parser.parse_args()
name = args.vmname
rgname = args.rgname
vnet = args.vnet
location = args.location
username = args.user
password = args.password
sshkey = args.sshkey
sshpath = args.sshpath
verbose = args.verbose
dns_label = args.dns
no_wait = args.nowait
no_nsg = args.nonsg
vmsize = args.vmsize
# make sure all authentication scenarios are handled
if sshkey is not None and sshpath is not None:
sys.exit('Error: You can provide an SSH public key, or a public key file path, not both.')
if password is not None and (sshkey is not None or sshpath is not None):
sys.exit('Error: provide a password or SSH key (or nothing), not both')
use_password = False
if password is not None:
use_password = True
else:
if sshkey is None and sshpath is None: # no auth parameters were provided
# look for ~/id_rsa.pub
home = os.path.expanduser('~')
sshpath = home + os.sep + '.ssh' + os.sep + 'id_rsa.pub'
if os.path.isfile(sshpath) is False:
print('Default public key file not found.')
use_password = True
password = Haikunator().haikunate(delimiter=',') # creates random password
print('Created new password = ' + password)
else:
print('Default public key file found')
if use_password is False:
print('Reading public key..')
if sshkey is None:
# at this point sshpath should have a valid Value
with open(sshpath, 'r') as pub_ssh_file_fd:
sshkey = pub_ssh_file_fd.read()
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit("Error: Expecting azurermconfig.json in current folder")
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# if no location parameter was specified now would be a good time to figure out the location
if location is None:
try:
rgroup = azurerm.get_resource_group(access_token, subscription_id, rgname)
location = rgroup['location']
except KeyError:
print('Cannot find resource group ' + rgname + '. Check connection/authorization.')
print(json.dumps(rgroup, sort_keys=False, indent=2, separators=(',', ': ')))
sys.exit()
print('location = ' + location)
# get VNET
print('Getting VNet')
vnet_not_found = False
if vnet is None:
print('VNet not set, checking resource group')
# get first VNET in resource group
try:
vnets = azurerm.list_vnets_rg(access_token, subscription_id, rgname)
# print(json.dumps(vnets, sort_keys=False, indent=2, separators=(',', ': ')))
vnetresource = vnets['value'][0]
except IndexError:
print('No VNET found in resource group.')
vnet_not_found = True
vnet = name + 'vnet'
else:
print('Getting VNet: ' + vnet)
vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet)
if 'properties' not in vnetresource:
print('VNet ' + vnet + ' not found in resource group ' + rgname)
vnet_not_found = True
if vnet_not_found is True:
# create a vnet
print('Creating vnet: ' + vnet)
rmresource = azurerm.create_vnet(access_token, subscription_id, rgname, vnet, location, \
address_prefix='10.0.0.0/16', nsg_id=None)
if rmresource.status_code != 201:
print('Error ' + str(vnetresource.status_code) + ' creating VNET. ' + vnetresource.text)
sys.exit()
vnetresource = azurerm.get_vnet(access_token, subscription_id, rgname, vnet)
try:
subnet_id = vnetresource['properties']['subnets'][0]['id']
except KeyError:
print('Subnet not found for VNet ' + vnet)
sys.exit()
if verbose is True:
print('subnet_id = ' + subnet_id)
public_ip_name = name + 'ip'
if dns_label is None:
dns_label = name + 'dns'
print('Creating public ipaddr')
rmreturn = azurerm.create_public_ip(access_token, subscription_id, rgname, public_ip_name,
dns_label, location)
if rmreturn.status_code not in [200, 201]:
print(rmreturn.text)
sys.exit('Error: ' + str(rmreturn.status_code) + ' from azurerm.create_public_ip()')
ip_id = rmreturn.json()['id']
if verbose is True:
print('ip_id = ' + ip_id)
print('Waiting for IP provisioning..')
waiting = True
while waiting:
pip = azurerm.get_public_ip(access_token, subscription_id, rgname, public_ip_name)
if pip['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
if no_nsg is True:
nsg_id = None
else:
# create NSG
nsg_name = name + 'nsg'
print('Creating NSG: ' + nsg_name)
rmreturn = azurerm.create_nsg(access_token, subscription_id, rgname, nsg_name, location)
if rmreturn.status_code not in [200, 201]:
print('Error ' + str(rmreturn.status_code) + ' creating NSG. ' + rmreturn.text)
sys.exit()
nsg_id = rmreturn.json()['id']
# create NSG rule for ssh, scp
nsg_rule = 'ssh'
print('Creating NSG rule: ' + nsg_rule)
rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, rgname, nsg_name,
nsg_rule, description='ssh rule',
destination_range='22')
if rmreturn.status_code not in [200, 201]:
print('Error ' + str(rmreturn.status_code) + ' creating NSG rule. ' + rmreturn.text)
sys.exit()
# create NIC
nic_name = name + 'nic'
print('Creating NIC: ' + nic_name)
rmreturn = azurerm.create_nic(access_token, subscription_id, rgname, nic_name, ip_id,
subnet_id, location, nsg_id=nsg_id)
if rmreturn.status_code not in [200, 201]:
print('Error ' + rmreturn.status_code + ' creating NSG rule. ' + rmreturn.text)
sys.exit()
nic_id = rmreturn.json()['id']
print('Waiting for NIC provisioning..')
waiting = True
while waiting:
nic = azurerm.get_nic(access_token, subscription_id, rgname, nic_name)
if nic['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(1)
# create VM
vm_name = name
#publisher = 'CoreOS'
#offer = 'CoreOS'
#sku = 'Stable'
publisher = 'Canonical'
offer = 'UbuntuServer'
sku = '16.04-LTS'
version = 'latest'
print('Creating VM: ' + vm_name)
if use_password is True:
rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize,
publisher, offer, sku, version, nic_id, location,
username=username, password=password)
else:
rmreturn = azurerm.create_vm(access_token, subscription_id, rgname, vm_name, vmsize,
publisher, offer, sku, version, nic_id, location,
username=username, public_key=sshkey)
if rmreturn.status_code != 201:
sys.exit('Error ' + rmreturn.status_code + ' creating VM. ' + rmreturn.text)
if no_wait is False:
print('Waiting for VM provisioning..')
waiting = True
while waiting:
vm_model = azurerm.get_vm(access_token, subscription_id, rgname, vm_name)
if vm_model['properties']['provisioningState'] == 'Succeeded':
waiting = False
time.sleep(5)
print('VM provisioning complete.')
print('Connect with:')
print('ssh ' + dns_label + '.' + location + '.cloudapp.azure.com -l ' + username) |
python | def to_sdf(self):
""" Converts the 2D image to a 2D signed distance field.
Returns
-------
:obj:`numpy.ndarray`
2D float array of the signed distance field
"""
# compute medial axis transform
skel, sdf_in = morph.medial_axis(self.data, return_distance=True)
useless_skel, sdf_out = morph.medial_axis(
np.iinfo(np.uint8).max - self.data, return_distance=True)
# convert to true sdf
sdf = sdf_out - sdf_in
return sdf |
python | def make_assertions(input_pipe, other_pipes, output_pipe):
"""
To assure that the pipe is correctly settled
:param input_pipe:
:param other_pipes: can be []
:param output_pipe:
:return:
"""
assert isinstance(input_pipe, elements.InPypElement), 'Wrong input element type, want a InPypElement!'
assert isinstance(output_pipe, elements.OutPypElement), 'Wrong output element type, want a OutPypElement!'
for other_pipe in other_pipes:
assert isinstance(other_pipe, elements.MidPypElement), 'Wrong middle element type, want a MidPypElement!' |
python | def conditions(self):
"""
conditions ::= condition | condition logical_binary_op conditions
Note: By default lpar and rpar arguments are suppressed.
"""
return operatorPrecedence(
baseExpr=self.condition,
opList=[(self.not_op, 1, opAssoc.RIGHT),
(self.logical_binary_op, 2, opAssoc.LEFT)],
lpar=self.syntax.paren_left,
rpar=self.syntax.paren_right) |
python | def _transition_stage(self, step, total_steps, brightness=None):
"""
Get a transition stage at a specific step.
:param step: The current step.
:param total_steps: The total number of steps.
:param brightness: The brightness to transition to (0.0-1.0).
:return: The stage at the specific step.
"""
if brightness is not None:
self._assert_is_brightness(brightness)
brightness = self._interpolate(self.brightness, brightness,
step, total_steps)
return {'brightness': brightness} |
python | def generate_ticket(name, output=None, grain=None, key=None, overwrite=True):
'''
Generate an icinga2 ticket on the master.
name
The domain name for which this ticket will be generated
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Checking if execution is needed.
if output == 'grain':
if grain and not key:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain {0} already set'.format(grain)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}'.format(grain)
return ret
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain {0}:{1} already set'.format(grain, key)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}:{1}'.format(grain, key)
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain parameter\n"
return ret
elif output:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File {0} already set'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in file: {0}'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, not storing result'
return ret
# Executing the command.
ticket_res = __salt__['icinga2.generate_ticket'](name)
ticket = ticket_res['stdout']
if not ticket_res['retcode']:
ret['comment'] = six.text_type(ticket)
if output == 'grain':
if grain and not key:
__salt__['grains.setval'](grain, ticket)
ret['changes']['ticket'] = "Executed. Output into grain: {0}".format(grain)
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = ticket
__salt__['grains.setval'](grain, grain_value)
ret['changes']['ticket'] = "Executed. Output into grain: {0}:{1}".format(grain, key)
elif output:
ret['changes']['ticket'] = "Executed. Output into {0}".format(output)
with salt.utils.files.fopen(output, 'w') as output_file:
output_file.write(salt.utils.stringutils.to_str(ticket))
else:
ret['changes']['ticket'] = "Executed"
return ret |
python | def _LoadArtifactsFromDatastore(self):
"""Load artifacts from the data store."""
loaded_artifacts = []
# TODO(hanuszczak): Why do we have to remove anything? If some artifact
# tries to shadow system artifact shouldn't we just ignore them and perhaps
# issue some warning instead? The datastore being loaded should be read-only
# during upload.
# A collection of artifacts that shadow system artifacts and need
# to be deleted from the data store.
to_delete = []
for artifact_coll_urn in self._sources.GetDatastores():
artifact_coll = ArtifactCollection(artifact_coll_urn)
if data_store.RelationalDBEnabled():
artifact_list = data_store.REL_DB.ReadAllArtifacts()
else:
artifact_list = list(artifact_coll)
for artifact_value in artifact_list:
try:
self.RegisterArtifact(
artifact_value,
source="datastore:%s" % artifact_coll_urn,
overwrite_if_exists=True)
loaded_artifacts.append(artifact_value)
logging.debug("Loaded artifact %s from %s", artifact_value.name,
artifact_coll_urn)
except rdf_artifacts.ArtifactDefinitionError as e:
# TODO(hanuszczak): String matching on exception message is rarely
# a good idea. Instead this should be refectored to some exception
# class and then handled separately.
if "system artifact" in str(e):
to_delete.append(artifact_value.name)
else:
raise
if to_delete:
DeleteArtifactsFromDatastore(to_delete, reload_artifacts=False)
self._dirty = True
# TODO(hanuszczak): This is connected to the previous TODO comment. Why
# do we throw exception at this point? Why do we delete something and then
# abort the whole upload procedure by throwing an exception?
detail = "system artifacts were shadowed and had to be deleted"
raise rdf_artifacts.ArtifactDefinitionError(to_delete, detail)
# Once all artifacts are loaded we can validate.
revalidate = True
while revalidate:
revalidate = False
for artifact_obj in loaded_artifacts[:]:
try:
Validate(artifact_obj)
except rdf_artifacts.ArtifactDefinitionError as e:
logging.error("Artifact %s did not validate: %s", artifact_obj.name,
e)
artifact_obj.error_message = utils.SmartStr(e)
loaded_artifacts.remove(artifact_obj)
revalidate = True |
java | public static void hideView(View parentView, int id) {
if (parentView != null) {
View view = parentView.findViewById(id);
if (view != null) {
view.setVisibility(View.GONE);
} else {
Log.e("Caffeine", "View does not exist. Could not hide it.");
}
}
} |
java | private String createString(String f)
{
StringBuilder sb = new StringBuilder();
sb.append("addressMode="+"("+
CUaddress_mode.stringFor(addressMode[0])+","+
CUaddress_mode.stringFor(addressMode[1])+","+
CUaddress_mode.stringFor(addressMode[2])+")"+f);
sb.append("filterMode="+CUfilter_mode.stringFor(filterMode)+f);
String flagsString = "";
if ((flags & JCudaDriver.CU_TRSF_READ_AS_INTEGER) != 0)
{
flagsString += "CU_TRSF_READ_AS_INTEGER";
}
if ((flags & JCudaDriver.CU_TRSF_NORMALIZED_COORDINATES) != 0)
{
flagsString += "CU_TRSF_NORMALIZED_COORDINATES";
}
sb.append("flags="+flags+"("+flagsString+")");
sb.append("maxAnisotropy="+maxAnisotropy);
sb.append("mipmapFilterMode="+CUfilter_mode.stringFor(mipmapFilterMode)+f);
sb.append("mipmapLevelBias="+mipmapLevelBias+f);
sb.append("minMipmapLevelClamp="+minMipmapLevelClamp+f);
sb.append("maxMipmapLevelClamp="+maxMipmapLevelClamp+f);
sb.append("borderColor="+Arrays.toString(borderColor)+f);
return sb.toString();
} |
python | def ip_dns_name_server_name_server_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
dns = ET.SubElement(ip, "dns", xmlns="urn:brocade.com:mgmt:brocade-ip-administration")
name_server = ET.SubElement(dns, "name-server")
name_server_ip = ET.SubElement(name_server, "name-server-ip")
name_server_ip.text = kwargs.pop('name_server_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
java | public static String[] splitPreserveAllTokens(String value, char separatorChar) {
if (value == null) {
return ArrayUtils.EMPTY_STRING_ARRAY;
}
int len = value.length();
if (len == 0) {
return ArrayUtils.EMPTY_STRING_ARRAY;
}
List<String> list = new ArrayList<>();
int i = 0;
int start = 0;
boolean match = false;
boolean lastMatch = false;
int escapeStart = -2;
while (i < len) {
char c = value.charAt(i);
if (c == ESCAPE_CHAR) {
escapeStart = i;
}
if (c == separatorChar && escapeStart != i - 1) {
lastMatch = true;
list.add(value.substring(start, i));
match = false;
start = ++i;
continue;
}
match = true;
i++;
}
if (match || lastMatch) {
list.add(value.substring(start, i));
}
return list.toArray(new String[list.size()]);
} |
java | public static String toUnicodeHex(char ch) {
StringBuilder sb = new StringBuilder(6);
sb.append("\\u");
sb.append(DIGITS_LOWER[(ch >> 12) & 15]);
sb.append(DIGITS_LOWER[(ch >> 8) & 15]);
sb.append(DIGITS_LOWER[(ch >> 4) & 15]);
sb.append(DIGITS_LOWER[(ch) & 15]);
return sb.toString();
} |
python | def axis(origin_size=0.04,
transform=None,
origin_color=None,
axis_radius=None,
axis_length=None):
"""
Return an XYZ axis marker as a Trimesh, which represents position
and orientation. If you set the origin size the other parameters
will be set relative to it.
Parameters
----------
transform : (4, 4) float
Transformation matrix
origin_size : float
Radius of sphere that represents the origin
origin_color : (3,) float or int, uint8 or float
Color of the origin
axis_radius : float
Radius of cylinder that represents x, y, z axis
axis_length: float
Length of cylinder that represents x, y, z axis
Returns
-------
marker : trimesh.Trimesh
Mesh geometry of axis indicators
"""
# the size of the ball representing the origin
origin_size = float(origin_size)
# set the transform and use origin-relative
# sized for other parameters if not specified
if transform is None:
transform = np.eye(4)
if origin_color is None:
origin_color = [255, 255, 255, 255]
if axis_radius is None:
axis_radius = origin_size / 5.0
if axis_length is None:
axis_length = origin_size * 10.0
# generate a ball for the origin
axis_origin = uv_sphere(radius=origin_size,
count=[10, 10])
axis_origin.apply_transform(transform)
# apply color to the origin ball
axis_origin.visual.face_colors = origin_color
# create the cylinder for the z-axis
translation = transformations.translation_matrix(
[0, 0, axis_length / 2])
z_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(translation))
# XYZ->RGB, Z is blue
z_axis.visual.face_colors = [0, 0, 255]
# create the cylinder for the y-axis
translation = transformations.translation_matrix(
[0, 0, axis_length / 2])
rotation = transformations.rotation_matrix(np.radians(-90),
[1, 0, 0])
y_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, Y is green
y_axis.visual.face_colors = [0, 255, 0]
# create the cylinder for the x-axis
translation = transformations.translation_matrix(
[0, 0, axis_length / 2])
rotation = transformations.rotation_matrix(np.radians(90),
[0, 1, 0])
x_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, X is red
x_axis.visual.face_colors = [255, 0, 0]
# append the sphere and three cylinders
marker = util.concatenate([axis_origin,
x_axis,
y_axis,
z_axis])
return marker |
python | def get_contact_from_id(self, contact_id):
"""
Fetches a contact given its ID
:param contact_id: Contact ID
:type contact_id: str
:return: Contact or Error
:rtype: Contact
"""
contact = self.wapi_functions.getContact(contact_id)
if contact is None:
raise ContactNotFoundError("Contact {0} not found".format(contact_id))
return Contact(contact, self) |
python | def get_last_metrics(self):
"""Read all measurement events since last call of the method.
:return List[ScalarMetricLogEntry]
"""
read_up_to = self._logged_metrics.qsize()
messages = []
for i in range(read_up_to):
try:
messages.append(self._logged_metrics.get_nowait())
except Empty:
pass
return messages |
java | public final void setIsSyntheticBlock(boolean val) {
checkState(token == Token.BLOCK);
putBooleanProp(Prop.SYNTHETIC, val);
} |
python | def launch_protect(job, patient_data, univ_options, tool_options):
"""
The launchpad for ProTECT. The DAG for ProTECT can be viewed in Flowchart.txt.
:param dict patient_data: Dict of information regarding the input sequences for the patient
:param dict univ_options: Dict of universal options used by almost all tools
:param dict tool_options: Options for the various tools
"""
# Add Patient id to univ_options as is is passed to every major node in the DAG and can be used
# as a prefix for the logfile.
univ_options['patient'] = patient_data['patient_id']
univ_options['tumor_type'] = patient_data['tumor_type']
# Ascertain number of cpus to use per job
for tool in tool_options:
tool_options[tool]['n'] = ascertain_cpu_share(univ_options['max_cores'])
# Define the various nodes in the DAG
# Need a logfile and a way to send it around
sample_prep = job.wrapJobFn(prepare_samples, patient_data, univ_options, disk='40G')
job.addChild(sample_prep)
# Define the fastq deletion step
fastq_deletion_1 = job.wrapJobFn(delete_fastqs, sample_prep.rv(), disk='100M', memory='100M')
sample_prep.addChild(fastq_deletion_1)
# Get all the input files
haplotype_patient = get_mutations = None
fastq_files = defaultdict(lambda: None)
bam_files = defaultdict(lambda: None)
delete_bam_files = defaultdict(lambda: None)
phlat_files = defaultdict(lambda: None)
for sample_type in 'tumor_dna', 'normal_dna', 'tumor_rna':
if sample_type + '_fastq_1' in patient_data:
fastq_files[sample_type] = job.wrapJobFn(get_patient_fastqs, sample_prep.rv(),
sample_type, disk='10M')
sample_prep.addChild(fastq_files[sample_type])
fastq_files[sample_type].addChild(fastq_deletion_1)
elif sample_type + '_bam' in patient_data:
bam_files[sample_type] = job.wrapJobFn(get_patient_bams, sample_prep.rv(), sample_type,
univ_options, tool_options['bwa'],
tool_options['mutect'],
disk='10M').encapsulate()
sample_prep.addChild(bam_files[sample_type])
# define the haplotyping subgraph of the DAG
if 'hla_haplotype_files' in patient_data:
haplotype_patient = job.wrapJobFn(get_patient_mhc_haplotype, sample_prep.rv())
sample_prep.addChild(haplotype_patient)
else:
assert None not in fastq_files.values()
# We are guaranteed to have fastqs here
for sample_type in 'tumor_dna', 'normal_dna', 'tumor_rna':
phlat_files[sample_type] = job.wrapJobFn(
run_phlat, fastq_files[sample_type].rv(), sample_type, univ_options,
tool_options['phlat'], cores=tool_options['phlat']['n'],
disk=PromisedRequirement(phlat_disk, fastq_files[sample_type].rv()))
fastq_files[sample_type].addChild(phlat_files[sample_type])
phlat_files[sample_type].addChild(fastq_deletion_1)
haplotype_patient = job.wrapJobFn(merge_phlat_calls,
phlat_files['tumor_dna'].rv(),
phlat_files['normal_dna'].rv(),
phlat_files['tumor_rna'].rv(),
univ_options, disk='100M', memory='100M', cores=1)
phlat_files['tumor_dna'].addChild(haplotype_patient)
phlat_files['normal_dna'].addChild(haplotype_patient)
phlat_files['tumor_rna'].addChild(haplotype_patient)
# Define the RNA-Seq Alignment subgraph if needed
if bam_files['tumor_rna'] is None:
if ('fusion_bedpe' in patient_data and 'expression_files' in patient_data and
'mutation_vcf' not in patient_data):
# If we are processing only fusions, and we have precomputed expression values, we don't
# need to align the rna
fusions = job.wrapJobFn(get_patient_bedpe, sample_prep.rv())
sample_prep.addChild(fusions)
bam_files['tumor_rna'] = job.wrapJobFn(dummy_job, {}, disk='1M', memory='1M', cores=1)
sample_prep.addChild(bam_files['tumor_rna'])
else:
assert fastq_files['tumor_rna'] is not None
cutadapt = job.wrapJobFn(run_cutadapt, fastq_files['tumor_rna'].rv(), univ_options,
tool_options['cutadapt'], cores=1,
disk=PromisedRequirement(cutadapt_disk,
fastq_files['tumor_rna'].rv()))
bam_files['tumor_rna'] = job.wrapJobFn(align_rna, cutadapt.rv(), univ_options,
tool_options['star'], cores=1,
disk='100M').encapsulate()
fastq_deletion_2 = job.wrapJobFn(delete_fastqs, {'cutadapted_rnas': cutadapt.rv()},
disk='100M', memory='100M')
fastq_files['tumor_rna'].addChild(cutadapt)
cutadapt.addChild(fastq_deletion_1)
cutadapt.addChild(fastq_deletion_2)
cutadapt.addChild(bam_files['tumor_rna'])
bam_files['tumor_rna'].addChild(fastq_deletion_2)
# Define the fusion calling node
if 'fusion_bedpe' not in patient_data:
tool_options['star_fusion']['index'] = tool_options['star']['index']
tool_options['fusion_inspector']['index'] = tool_options['star']['index']
fusions = job.wrapJobFn(wrap_fusion,
cutadapt.rv(),
bam_files['tumor_rna'].rv(),
univ_options,
tool_options['star_fusion'],
tool_options['fusion_inspector'],
disk='100M', memory='100M', cores=1).encapsulate()
else:
fusions = job.wrapJobFn(get_patient_bedpe, sample_prep.rv())
sample_prep.addChild(fusions)
bam_files['tumor_rna'].addChild(fusions)
fusions.addChild(fastq_deletion_1)
fusions.addChild(fastq_deletion_2)
else:
# Define the fusion calling node
if 'fusion_bedpe' in patient_data:
fusions = job.wrapJobFn(get_patient_bedpe, sample_prep.rv())
sample_prep.addChild(fusions)
else:
if tool_options['star_fusion']['run'] is True:
job.fileStore.logToMaster('Input RNA bams were provided for sample %s. Fusion '
'detection can only be run with input '
'fastqs.' % univ_options['patient'])
fusions = None
# Define the Expression estimation node
if 'expression_files' in patient_data:
rsem = job.wrapJobFn(get_patient_expression, sample_prep.rv())
sample_prep.addChild(rsem)
else:
rsem = job.wrapJobFn(wrap_rsem, bam_files['tumor_rna'].rv(), univ_options,
tool_options['rsem'], cores=1, disk='100M').encapsulate()
bam_files['tumor_rna'].addChild(rsem)
# Define the bam deletion node
delete_bam_files['tumor_rna'] = job.wrapJobFn(delete_bams,
bam_files['tumor_rna'].rv(),
univ_options['patient'],
disk='100M',
memory='100M')
bam_files['tumor_rna'].addChild(delete_bam_files['tumor_rna'])
rsem.addChild(delete_bam_files['tumor_rna'])
if fusions:
fusions.addChild(delete_bam_files['tumor_rna'])
# Define the reporting leaves
if phlat_files['tumor_rna'] is not None:
mhc_pathway_assessment = job.wrapJobFn(run_mhc_gene_assessment, rsem.rv(),
phlat_files['tumor_rna'].rv(), univ_options,
tool_options['reports'], disk='100M',
memory='100M', cores=1)
rsem.addChild(mhc_pathway_assessment)
phlat_files['tumor_rna'].addChild(mhc_pathway_assessment)
else:
mhc_pathway_assessment = job.wrapJobFn(run_mhc_gene_assessment, rsem.rv(), None,
univ_options, tool_options['reports'],
disk='100M', memory='100M', cores=1)
rsem.addChild(mhc_pathway_assessment)
itx_resistance_assessment = job.wrapJobFn(run_itx_resistance_assessment, rsem.rv(),
univ_options, tool_options['reports'],
disk='100M', memory='100M', cores=1)
rsem.addChild(itx_resistance_assessment)
car_t_validity_assessment = job.wrapJobFn(run_car_t_validity_assessment, rsem.rv(),
univ_options, tool_options['reports'],
disk='100M', memory='100M', cores=1)
rsem.addChild(car_t_validity_assessment)
# Define the DNA-Seq alignment and mutation calling subgraphs if necessary
if 'mutation_vcf' in patient_data:
get_mutations = job.wrapJobFn(get_patient_vcf, sample_prep.rv())
sample_prep.addChild(get_mutations)
elif 'fusion_bedpe' in patient_data:
# Fusions have been handled above, and we don't need to align DNA
get_mutations = None
else:
assert (None, None) not in zip(fastq_files.values(), bam_files.values())
for sample_type in 'tumor_dna', 'normal_dna':
if bam_files[sample_type] is None:
assert fastq_files[sample_type] is not None
bam_files[sample_type] = job.wrapJobFn(align_dna, fastq_files[sample_type].rv(),
sample_type, univ_options,
tool_options['bwa'], cores=1,
disk='100M').encapsulate()
fastq_files[sample_type].addChild(bam_files[sample_type])
bam_files[sample_type].addChild(fastq_deletion_1)
else:
# We already have the bam ready to go
pass
delete_bam_files[sample_type] = job.wrapJobFn(delete_bams,
bam_files[sample_type].rv(),
univ_options['patient'], disk='100M',
memory='100M')
bam_files[sample_type].addChild(delete_bam_files[sample_type])
# Time to call mutations
mutations = {
'radia': job.wrapJobFn(run_radia, bam_files['tumor_rna'].rv(),
bam_files['tumor_dna'].rv(), bam_files['normal_dna'].rv(),
univ_options, tool_options['radia'],
disk='100M').encapsulate(),
'mutect': job.wrapJobFn(run_mutect, bam_files['tumor_dna'].rv(),
bam_files['normal_dna'].rv(), univ_options,
tool_options['mutect'], disk='100M').encapsulate(),
'muse': job.wrapJobFn(run_muse, bam_files['tumor_dna'].rv(),
bam_files['normal_dna'].rv(), univ_options,
tool_options['muse']).encapsulate(),
'somaticsniper': job.wrapJobFn(run_somaticsniper, bam_files['tumor_dna'].rv(),
bam_files['normal_dna'].rv(), univ_options,
tool_options['somaticsniper']).encapsulate(),
'strelka': job.wrapJobFn(run_strelka, bam_files['tumor_dna'].rv(),
bam_files['normal_dna'].rv(), univ_options,
tool_options['strelka']).encapsulate(),
'indels': job.wrapJobFn(run_indel_caller, bam_files['tumor_dna'].rv(),
bam_files['normal_dna'].rv(), univ_options, 'indel_options',
disk='100M', memory='100M', cores=1)}
for sample_type in 'tumor_dna', 'normal_dna':
for caller in mutations:
bam_files[sample_type].addChild(mutations[caller])
bam_files['tumor_rna'].addChild(mutations['radia'])
get_mutations = job.wrapJobFn(run_mutation_aggregator,
{caller: cjob.rv() for caller, cjob in mutations.items()},
univ_options, disk='100M', memory='100M',
cores=1).encapsulate()
for caller in mutations:
mutations[caller].addChild(get_mutations)
# We don't need the normal dna bam any more
get_mutations.addChild(delete_bam_files['normal_dna'])
# We may need the tumor one depending on OxoG
if not patient_data['filter_for_OxoG']:
get_mutations.addChild(delete_bam_files['tumor_dna'])
if get_mutations is not None:
snpeff = job.wrapJobFn(run_snpeff, get_mutations.rv(), univ_options, tool_options['snpeff'],
disk=PromisedRequirement(snpeff_disk,
tool_options['snpeff']['index']))
get_mutations.addChild(snpeff)
else:
snpeff = None
# The rest of the subgraph should be unchanged
tumor_dna_bam = bam_files['tumor_dna'].rv() if patient_data['filter_for_OxoG'] else None
fusion_calls = fusions.rv() if fusions else None
snpeffed_calls = snpeff.rv() if snpeff else None
transgene = job.wrapJobFn(run_transgene, snpeffed_calls, bam_files['tumor_rna'].rv(),
univ_options, tool_options['transgene'],
disk=PromisedRequirement(transgene_disk, bam_files['tumor_rna'].rv()),
memory='100M', cores=1, tumor_dna_bam=tumor_dna_bam,
fusion_calls=fusion_calls)
if snpeff:
snpeff.addChild(transgene)
bam_files['tumor_rna'].addChild(transgene)
transgene.addChild(delete_bam_files['tumor_rna'])
if patient_data['filter_for_OxoG']:
bam_files['tumor_dna'].addChild(transgene)
transgene.addChild(delete_bam_files['tumor_dna'])
if fusions:
fusions.addChild(transgene)
spawn_mhc = job.wrapJobFn(spawn_antigen_predictors, transgene.rv(), haplotype_patient.rv(),
univ_options, (tool_options['mhci'], tool_options['mhcii']),
disk='100M', memory='100M', cores=1).encapsulate()
haplotype_patient.addChild(spawn_mhc)
transgene.addChild(spawn_mhc)
merge_mhc = job.wrapJobFn(merge_mhc_peptide_calls, spawn_mhc.rv(), transgene.rv(), univ_options,
disk='100M', memory='100M', cores=1)
spawn_mhc.addFollowOn(merge_mhc)
transgene.addChild(merge_mhc)
rankboost = job.wrapJobFn(wrap_rankboost, rsem.rv(), merge_mhc.rv(), transgene.rv(),
univ_options, tool_options['rankboost'], disk='100M', memory='100M',
cores=1)
rsem.addChild(rankboost)
merge_mhc.addChild(rankboost)
transgene.addChild(rankboost)
report_success = job.wrapJobFn(email_report, univ_options)
rankboost.addChild(report_success)
return None |
java | public java.util.List<Tape> getTapes() {
if (tapes == null) {
tapes = new com.amazonaws.internal.SdkInternalList<Tape>();
}
return tapes;
} |
Subsets and Splits