language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def do_transition_for(brain_or_object, transition):
"""Performs a workflow transition for the passed in object.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: The object where the transtion was performed
"""
if not isinstance(transition, basestring):
fail("Transition type needs to be string, got '%s'" % type(transition))
obj = get_object(brain_or_object)
try:
ploneapi.content.transition(obj, transition)
except ploneapi.exc.InvalidParameterError as e:
fail("Failed to perform transition '{}' on {}: {}".format(
transition, obj, str(e)))
return obj |
python | def _execute(self,
native,
command,
data=None,
returning=True,
mapper=dict):
"""
Executes the inputted command into the current \
connection cursor.
:param command | <str>
data | <dict> || None
autoCommit | <bool> | commit database changes immediately
autoClose | <bool> | closes connections immediately
:return [{<str> key: <variant>, ..}, ..], <int> count
"""
if data is None:
data = {}
cursor = native.cursor(cursor_factory=DictCursor)
# register the hstore option
try:
register_hstore(cursor, unicode=True)
except pg.ProgrammingError:
log.warning('HSTORE is not supported in this version of Postgres!')
# register the json option
try:
register_json(cursor)
except pg.ProgrammingError:
log.warning('JSON is not supported in this version of Postgres!')
start = datetime.datetime.now()
log.debug('***********************')
log.debug(command % data)
log.debug('***********************')
try:
cursor.execute(command, data)
rowcount = cursor.rowcount
# look for a cancelled query
except pg_ext.QueryCanceledError as cancelled:
try:
native.rollback()
except StandardError as err:
log.error('Rollback error: {0}'.format(err))
log.critical(command)
if data:
log.critical(str(data))
# raise more useful errors
if 'statement timeout' in str(cancelled):
raise orb.errors.QueryTimeout(command, (datetime.datetime.now() - start).total_seconds())
else:
raise orb.errors.Interruption()
# look for a disconnection error
except pg.InterfaceError:
raise orb.errors.ConnectionLost()
# look for integrity errors
except (pg.IntegrityError, pg.OperationalError) as err:
try:
native.rollback()
except StandardError:
pass
# look for a duplicate error
duplicate_error = re.search('Key (.*) already exists.', nstr(err))
if duplicate_error:
key = duplicate_error.group(1)
result = re.match('^\(lower\((?P<column>[^\)]+)::text\)\)=\((?P<value>[^\)]+)\)$', key)
if not result:
result = re.match('^(?P<column>\w+)=(?P<value>\w+)', key)
if result:
msg = '{value} is already being used.'.format(**result.groupdict())
raise orb.errors.DuplicateEntryFound(msg)
else:
raise orb.errors.DuplicateEntryFound(duplicate_error.group())
# look for a reference error
reference_error = re.search('Key .* is still referenced from table ".*"', nstr(err))
if reference_error:
msg = 'Cannot remove this record, it is still being referenced.'
raise orb.errors.CannotDelete(msg)
# unknown error
log.debug(traceback.print_exc())
raise orb.errors.QueryFailed(command, data, nstr(err))
# connection has closed underneath the hood
except (pg.Error, pg.ProgrammingError) as err:
try:
native.rollback()
except StandardError:
pass
log.error(traceback.print_exc())
raise orb.errors.QueryFailed(command, data, nstr(err))
try:
results = [mapper(record) for record in cursor.fetchall()]
except pg.ProgrammingError:
results = []
return results, rowcount |
python | def get_health(self, consumers=2, messages=100):
"""
Returns health information on transport & Redis connections.
"""
data = {'consumers': consumers, 'messages': messages}
try:
self._request('GET', '/health', data=json.dumps(data))
return True
except SensuAPIException:
return False |
java | public void setLength(long newLength) throws IOException {
if (newLength < 0) {
throw new IllegalArgumentException("newLength < 0");
}
try {
Libcore.os.ftruncate(fd, newLength);
} catch (ErrnoException errnoException) {
throw errnoException.rethrowAsIOException();
}
long filePointer = getFilePointer();
if (filePointer > newLength) {
seek(newLength);
}
// if we are in "rws" mode, attempt to sync file+metadata
if (syncMetadata) {
fd.sync();
}
} |
java | private boolean isCausal(KamEdge edge) {
return edge.getRelationshipType() == RelationshipType.INCREASES
|| edge.getRelationshipType() == RelationshipType.DIRECTLY_INCREASES
|| edge.getRelationshipType() == RelationshipType.DECREASES
|| edge.getRelationshipType() == RelationshipType.DIRECTLY_DECREASES;
} |
python | def restore_geometry_on_layout_change(self, value):
"""
Setter for **self.__restore_geometry_on_layout_change** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format(
"restore_geometry_on_layout_change", value)
self.__restore_geometry_on_layout_change = value |
python | def set(self, values):
"""
Set the object parameters using a dictionary
"""
if hasattr(self, "inputs"):
for item in self.inputs:
if hasattr(self, item):
setattr(self, item, values[item]) |
python | def remove(self, x):
"""Removes given arg (or list thereof) from Args object."""
def _remove(x):
found = self.first(x)
if found is not None:
self._args.pop(found)
if _is_collection(x):
for item in x:
_remove(x)
else:
_remove(x) |
python | def cnvlTc(idxPrc,
aryPrfTcChunk,
lstHrf,
varTr,
varNumVol,
queOut,
varOvsmpl=10,
varHrfLen=32,
):
"""
Convolution of time courses with HRF model.
"""
# *** prepare hrf time courses for convolution
print("---------Process " + str(idxPrc) +
": Prepare hrf time courses for convolution")
# get frame times, i.e. start point of every volume in seconds
vecFrms = np.arange(0, varTr * varNumVol, varTr)
# get supersampled frames times, i.e. start point of every volume in
# seconds, since convolution takes place in temp. upsampled space
vecFrmTms = np.arange(0, varTr * varNumVol, varTr / varOvsmpl)
# get resolution of supersampled frame times
varRes = varTr / float(varOvsmpl)
# prepare empty list that will contain the arrays with hrf time courses
lstBse = []
for hrfFn in lstHrf:
# needs to be a multiple of oversample
vecTmpBse = hrfFn(np.linspace(0, varHrfLen,
(varHrfLen // varTr) * varOvsmpl))
lstBse.append(vecTmpBse)
# *** prepare pixel time courses for convolution
print("---------Process " + str(idxPrc) +
": Prepare pixel time courses for convolution")
# adjust the input, if necessary, such that input is 2D, with last dim time
tplInpShp = aryPrfTcChunk.shape
aryPrfTcChunk = aryPrfTcChunk.reshape((-1, aryPrfTcChunk.shape[-1]))
# Prepare an empty array for ouput
aryConv = np.zeros((aryPrfTcChunk.shape[0], len(lstHrf),
aryPrfTcChunk.shape[1]))
print("---------Process " + str(idxPrc) +
": Convolve")
# Each time course is convolved with the HRF separately, because the
# numpy convolution function can only be used on one-dimensional data.
# Thus, we have to loop through time courses:
for idxTc in range(0, aryConv.shape[0]):
# Extract the current time course:
vecTc = aryPrfTcChunk[idxTc, :]
# upsample the pixel time course, so that it matches the hrf time crs
vecTcUps = np.zeros(int(varNumVol * varTr/varRes))
vecOns = vecFrms[vecTc.astype(bool)]
vecInd = np.round(vecOns / varRes).astype(np.int)
vecTcUps[vecInd] = 1.
# *** convolve
for indBase, base in enumerate(lstBse):
# perform the convolution
col = np.convolve(base, vecTcUps, mode='full')[:vecTcUps.size]
# get function for downsampling
f = interp1d(vecFrmTms, col)
# downsample to original space and assign to ary
aryConv[idxTc, indBase, :] = f(vecFrms)
# determine output shape
tplOutShp = tplInpShp[:-1] + (len(lstHrf), ) + (tplInpShp[-1], )
# Create list containing the convolved timecourses, and the process ID:
lstOut = [idxPrc,
aryConv.reshape(tplOutShp)]
# Put output to queue:
queOut.put(lstOut) |
java | protected Object traceChainedPropertyValue(String chainedName) {
final Object failureValue = getTypeFailureMap().get(chainedName);
if (failureValue != null) {
return failureValue;
}
final String firstName = Srl.substringFirstFront(chainedName, ".");
final String nestedChain = Srl.substringFirstRear(chainedName, ".");
// trace by only defined type,
// instance of properties are created as (almost) defined type by framework
// and you can check definition of nested property
final ActionFormProperty property = findProperty(firstName);
final List<String> nestedList = Srl.splitList(nestedChain, ".");
Object currentObj = getPropertyValue(property);
Class<?> currentType = property.getPropertyDesc().getPropertyType();
Integer arrayIndex = extractArrayIndexIfExists(firstName);
for (String nested : nestedList) {
// no quit if value is null to check definition of nested property
if (List.class.isAssignableFrom(currentType)) { // sea[0].dockside[1].waves
currentObj = currentObj != null ? ((List<?>) currentObj).get(arrayIndex) : null;
if (currentObj != null) {
currentType = currentObj.getClass();
} else {
break; // cannot get type so cannot continue
}
}
if (Map.class.isAssignableFrom(currentType)) {
currentObj = currentObj != null ? ((Map<?, ?>) currentObj).get(nested) : null;
currentType = currentObj != null ? currentObj.getClass() : null;
if (currentObj != null) {
currentType = currentObj.getClass();
} else {
break; // cannot get type so cannot continue
}
} else {
final BeanDesc beanDesc = BeanDescFactory.getBeanDesc(currentType);
final PropertyDesc pd;
try {
pd = beanDesc.getPropertyDesc(nested); // check here
} catch (BeanPropertyNotFoundException e) {
throwNestedFormPropertyNotFoundException(chainedName, nested, e);
return null; // unreachable
}
if (currentObj != null) {
if (currentObj instanceof BeanWrapper) {
currentObj = ((BeanWrapper) currentObj).get(nested);
} else {
currentObj = pd.getValue(currentObj);
}
}
currentType = pd.getPropertyType();
}
arrayIndex = extractArrayIndexIfExists(nested);
}
return currentObj;
} |
java | protected FlushStrategy elementAsFlushStrategy(XMLStreamReader reader, Map<String, String> expressions)
throws XMLStreamException, ParserException
{
String elementtext = rawElementText(reader);
if (expressions != null && elementtext != null && elementtext.indexOf("${") != -1)
expressions.put(CommonXML.ELEMENT_FLUSH_STRATEGY, elementtext);
FlushStrategy result = FlushStrategy.forName(getSubstitutionValue(elementtext));
if (result != FlushStrategy.UNKNOWN)
return result;
throw new ParserException(bundle.notValidFlushStrategy(elementtext));
} |
python | def police_priority_map_exceed_map_pri7_exceed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
police_priority_map = ET.SubElement(config, "police-priority-map", xmlns="urn:brocade.com:mgmt:brocade-policer")
name_key = ET.SubElement(police_priority_map, "name")
name_key.text = kwargs.pop('name')
exceed = ET.SubElement(police_priority_map, "exceed")
map_pri7_exceed = ET.SubElement(exceed, "map-pri7-exceed")
map_pri7_exceed.text = kwargs.pop('map_pri7_exceed')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
python | def set_op_upstreams(op_run, op):
"""Set the upstream operations for operation run."""
# We get a list of all upstream ops or the current op
upstream_ops = op.upstream_operations.values_list('id', flat=True)
# We get latest op runs for the upstream_ops
latest_op_runs = OperationRun.objects.filter(id__in=upstream_ops)
latest_op_runs = latest_op_runs.annotate(max_date=Max('created_at'))
latest_op_runs = latest_op_runs.filter(date=F('max_date'))
# Set the upstream ops
op_run.set(latest_op_runs) |
java | public double[][] toDenseArray() {
double[][] arr = new double[rows][backingMatrix.columns()];
for (int i = 0; i < rowToReal.length; ++i)
arr[i] = backingMatrix.getRow(rowToReal[i]);
return arr;
} |
java | @Override
public void initialize() throws RepositoryException{
initialized = true;
try {
queryEngine = configuration.loadQueryEngine();
queryEngine.connect();
}
catch (Exception e){
throw new RepositoryException(e);
}
} |
java | public static SerializedRecord anonymize(SerializedRecord sr)
throws Exception {
String hostname = sr.get("hostname");
if (hostname == null)
throw new Exception("Malformed SerializedRecord: no hostname found");
if ("true".equalsIgnoreCase(Environment
.getProperty("anonymizer.hash.hostnames"))) {
// hash the node's hostname
anonymizeField(sr, "message", hostname, "_hn_");
anonymizeField(sr, "hostname", hostname, "_hn_");
// hash all other hostnames
String suffix = Environment.getProperty("anonymizer.hostname.suffix");
if (suffix != null)
anonymizeField(sr, "message", "(\\S+\\.)*" + suffix, "_hn_");
}
if ("true".equalsIgnoreCase(Environment.getProperty("anonymizer.hash.ips"))) {
// hash all ip addresses
String ipPattern = "(\\d{1,3}\\.){3}\\d{1,3}";
anonymizeField(sr, "message", ipPattern, "_ip_");
anonymizeField(sr, "ips", ipPattern, "_ip_");
// if multiple ips are present for a node:
int i = 0;
while (sr.get("ips" + "#" + i) != null)
anonymizeField(sr, "ips" + "#" + i++, ipPattern, "_ip_");
if ("NIC".equalsIgnoreCase(sr.get("type")))
anonymizeField(sr, "ipAddress", ipPattern, "_ip_");
}
if ("true".equalsIgnoreCase(Environment
.getProperty("anonymizer.hash.filenames"))) {
// hash every filename present in messages
anonymizeField(sr, "message", "\\s+/(\\S+/)*[^:\\s]*", " _fn_");
anonymizeField(sr, "message", "\\s+hdfs://(\\S+/)*[^:\\s]*",
" hdfs://_fn_");
}
return sr;
} |
python | def get_base_logfilename(logname):
""" Return filename for a logfile, filename will contain the actual path +
filename
:param logname: Name of the log including the extension, should describe
what it contains (eg. "device_serial_port.log")
"""
logdir = get_base_dir()
fname = os.path.join(logdir, logname)
GLOBAL_LOGFILES.append(fname)
return fname |
java | public void reinitialize( HttpServletRequest request, HttpServletResponse response, ServletContext servletContext )
{
super.reinitialize( request, response, servletContext );
if ( _pageInputs == null )
{
Map map = InternalUtils.getActionOutputMap( request, false );
if ( map != null ) _pageInputs = Collections.unmodifiableMap( map );
}
//
// Initialize the page flow field.
//
Field pageFlowMemberField = getCachedInfo().getPageFlowMemberField();
// TODO: should we add a compiler warning if this field isn't transient? All this reinitialization logic is
// for the transient case.
if ( fieldIsUninitialized( pageFlowMemberField ) )
{
PageFlowController pfc = PageFlowUtils.getCurrentPageFlow( request, servletContext );
initializeField( pageFlowMemberField, pfc );
}
//
// Initialize the shared flow fields.
//
CachedSharedFlowRefInfo.SharedFlowFieldInfo[] sharedFlowMemberFields =
getCachedInfo().getSharedFlowMemberFields();
if ( sharedFlowMemberFields != null )
{
for ( int i = 0; i < sharedFlowMemberFields.length; i++ )
{
CachedSharedFlowRefInfo.SharedFlowFieldInfo fi = sharedFlowMemberFields[i];
Field field = fi.field;
if ( fieldIsUninitialized( field ) )
{
Map/*< String, SharedFlowController >*/ sharedFlows = PageFlowUtils.getSharedFlows( request );
String name = fi.sharedFlowName;
SharedFlowController sf =
name != null ? ( SharedFlowController ) sharedFlows.get( name ) : PageFlowUtils.getGlobalApp( request );
if ( sf != null )
{
initializeField( field, sf );
}
else
{
_log.error( "Could not find shared flow with name \"" + fi.sharedFlowName
+ "\" to initialize field " + field.getName() + " in " + getClass().getName() );
}
}
}
}
} |
java | public static void touch(final File folder , final String fileName) throws IOException {
if(!folder.exists()){
folder.mkdirs();
}
final File touchedFile = new File(folder, fileName);
// The JVM will only 'touch' the file if you instantiate a
// FileOutputStream instance for the file in question.
// You don't actually write any data to the file through
// the FileOutputStream. Just instantiate it and close it.
try (
FileOutputStream doneFOS = new FileOutputStream(touchedFile);
) {
// Touching the file
}
catch (FileNotFoundException e) {
throw new FileNotFoundException("Failed to the find file." + e);
}
} |
python | def _get_groups(self, data):
""" Get all groups defined """
groups = []
for attribute in SOURCE_KEYS:
for k, v in data[attribute].items():
if k == None:
k = 'Sources'
if k not in groups:
groups.append(k)
for k, v in data['include_files'].items():
if k == None:
k = 'Includes'
if k not in groups:
groups.append(k)
return groups |
java | private boolean _canSkipWhileScanning(MetricSchemaRecordQuery query, RecordType type) {
if( (RecordType.METRIC.equals(type) || RecordType.SCOPE.equals(type))
&& !SchemaService.containsFilter(query.getTagKey())
&& !SchemaService.containsFilter(query.getTagValue())
&& !SchemaService.containsFilter(query.getNamespace())) {
if(RecordType.METRIC.equals(type) && !SchemaService.containsFilter(query.getMetric())) {
return false;
}
if(RecordType.SCOPE.equals(type) && !SchemaService.containsFilter(query.getScope())) {
return false;
}
return true;
}
return false;
} |
python | def show_spindle_dialog(self):
"""Create the spindle detection dialog."""
self.spindle_dialog.update_groups()
self.spindle_dialog.update_cycles()
self.spindle_dialog.show() |
java | public static String getLoginTarget(CmsObject currentCms, CmsWorkplaceSettings settings, String requestedResource)
throws CmsException {
String directEditPath = CmsLoginHelper.getDirectEditPath(currentCms, settings.getUserSettings(), false);
String target = "";
boolean checkRole = false;
String fragment = UI.getCurrent() != null ? UI.getCurrent().getPage().getUriFragment() : "";
boolean workplace2 = false;
if ((requestedResource == null) && (directEditPath != null)) {
target = directEditPath;
} else if ((requestedResource != null) && !CmsWorkplace.JSP_WORKPLACE_URI.equals(requestedResource)) {
target = requestedResource;
} else {
workplace2 = true;
target = CmsVaadinUtils.getWorkplaceLink();
checkRole = true;
}
UserAgreementHelper userAgreementHelper = new UserAgreementHelper(currentCms, settings);
boolean showUserAgreement = userAgreementHelper.isShowUserAgreement();
if (showUserAgreement) {
target = userAgreementHelper.getConfigurationVfsPath()
+ "?"
+ CmsLoginUserAgreement.PARAM_WPRES
+ "="
+ target;
}
if (checkRole && !OpenCms.getRoleManager().hasRole(currentCms, CmsRole.WORKPLACE_USER)) {
workplace2 = false;
target = CmsLoginHelper.getDirectEditPath(currentCms, settings.getUserSettings(), true);
if (target == null) {
throw new CmsCustomLoginException(
org.opencms.workplace.Messages.get().container(
org.opencms.workplace.Messages.GUI_LOGIN_FAILED_NO_WORKPLACE_PERMISSIONS_0));
}
}
if (!workplace2) {
target = OpenCms.getLinkManager().substituteLink(currentCms, target);
}
if (workplace2 && CmsStringUtil.isEmptyOrWhitespaceOnly(fragment)) {
if (settings.getUserSettings().getStartView().startsWith("/")) {
if (CmsWorkplace.VIEW_WORKPLACE.equals(settings.getUserSettings().getStartView())) {
fragment = CmsFileExplorerConfiguration.APP_ID;
} else if (CmsWorkplace.VIEW_ADMIN.equals(settings.getUserSettings().getStartView())) {
fragment = CmsAppHierarchyConfiguration.APP_ID;
}
} else {
fragment = settings.getUserSettings().getStartView();
}
}
if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(fragment)) {
target += "#" + fragment;
}
return target;
} |
python | def gcstats():
"""Count the number of instances of each type/class
:returns: A dict() mapping type (as a string) to an integer number of references
"""
all = gc.get_objects()
_stats = {}
for obj in all:
K = type(obj)
if K is StatsDelta:
continue # avoid counting ourselves
elif K is InstanceType: # instance of an old-style class
K = getattr(obj, '__class__', K)
# Track types as strings to avoid holding references
K = str(K)
try:
_stats[K] += 1
except KeyError:
_stats[K] = 1
# explicitly break the reference loop between the list and this frame,
# which is contained in the list
# This would otherwise prevent the list from being free'd
del all
return _stats |
python | def update(self, **kwargs):
"""Due to a password decryption bug
we will disable update() method for 12.1.0 and up
"""
tmos_version = self._meta_data['bigip'].tmos_version
if LooseVersion(tmos_version) > LooseVersion('12.0.0'):
msg = "Update() is unsupported for User on version %s. " \
"Utilize Modify() method instead" % tmos_version
raise UnsupportedOperation(msg)
else:
self._update(**kwargs) |
python | def speak_phrase(self, text, language, format_audio=None, option=None):
"""
This method is very similar to the above, the difference between
them is that this method creates an object of class
TranslateSpeak(having therefore different attributes) and use
another url, as we see the presence of SpeakMode enumerator instead
of Translate.
The parameter ::language:: is the same as the previous
method(the parameter ::lang_to::). To see all possible languages go
to the home page of the documentation that library.
The parameter ::format_audio:: can be of two types: "audio/mp3" or
"audio/wav". If we do not define, Microsoft api will insert by
default the "audio/wav". It is important to be aware that, to
properly name the file downloaded by AudioSpeaked
class(which uses theclassmethod download).
The parameter ::option:: is responsible for setting the audio quality.
It can be of two types: "MaxQuality" or "MinQuality". By default, if
not define, it will be "MinQuality".
"""
infos_speak_translate = SpeakModel(
text, language, format_audio, option).to_dict()
mode_translate = TranslatorMode.SpeakMode.value
return self._get_content(infos_speak_translate, mode_translate) |
java | public Object getTag() {
Object result = null;
if (view != null) {
result = view.getTag();
}
return result;
} |
python | def hideFromPublicBundle(self, otpk_pub):
"""
Hide a one-time pre key from the public bundle.
:param otpk_pub: The public key of the one-time pre key to hide, encoded as a
bytes-like object.
"""
self.__checkSPKTimestamp()
for otpk in self.__otpks:
if otpk.pub == otpk_pub:
self.__otpks.remove(otpk)
self.__hidden_otpks.append(otpk)
self.__refillOTPKs() |
python | def disableHook(self, msgObj):
"""
Disable yank-pop.
The ``enableHook`` method (see below) connects this method
to the ``qtesigKeyseqComplete`` signal to catch
consecutive calls to this ``yank-pop`` macro. Once the user
issues a key sequence for any other macro but this one, the
kill-list index will be set to a negative index, effectively
disabling the macro.
"""
# Unpack the data structure.
macroName, keysequence = msgObj.data
if macroName != self.qteMacroName():
self.qteMain.qtesigKeyseqComplete.disconnect(
self.disableHook)
self.killListIdx = -1 |
python | def _init_mask_psf(self):
"""
smaller frame that encolses all the idex_mask
:param idex_mask:
:param nx:
:param ny:
:return:
"""
if not hasattr(self, '_x_min_psf'):
idex_2d = self._idex_mask_2d
self._x_min_psf = np.min(np.where(idex_2d == 1)[0])
self._x_max_psf = np.max(np.where(idex_2d == 1)[0])
self._y_min_psf = np.min(np.where(idex_2d == 1)[1])
self._y_max_psf = np.max(np.where(idex_2d == 1)[1]) |
java | private static boolean setParamFromString(String name, String value) throws ConfigurationException {
try {
Field field = config.getClass().getDeclaredField(name);
String fieldType = field.getType().toString();
if (fieldType.compareToIgnoreCase("int") == 0) {
field.set(config, Integer.parseInt(value));
} else if (fieldType.compareToIgnoreCase("boolean") == 0) {
field.set(config, Boolean.parseBoolean(value));
} else if (fieldType.endsWith("List")) {
setCollectionParam(name, Arrays.asList(value.split(",")));
} else {
field.set(config, value);
}
return true;
} catch (SecurityException | NoSuchFieldException | IllegalAccessException e) {
return false;
} catch (IllegalArgumentException e) {
throw new ConfigurationException("Couldn't parse parameter: " + value);
}
} |
python | def add_traits(self, **traits):
"""Dynamically add trait attributes to the Widget."""
super(Widget, self).add_traits(**traits)
for name, trait in traits.items():
if trait.get_metadata('sync'):
self.keys.append(name)
self.send_state(name) |
java | @Override
public void notifyMessageReceived(SQSMessage message) throws JMSException {
SQSMessageIdentifier messageIdentifier = SQSMessageIdentifier.fromSQSMessage(message);
unAckMessages.put(message.getReceiptHandle(), messageIdentifier);
} |
java | public int getExternalFieldValue(final String externalFieldName, final JBBPCompiledBlock compiledBlock, final JBBPIntegerValueEvaluator evaluator) {
final String normalizedName = JBBPUtils.normalizeFieldNameOrPath(externalFieldName);
if (this.externalValueProvider == null) {
throw new JBBPEvalException("Request for '" + externalFieldName + "' but there is not any value provider", evaluator);
} else {
return this.externalValueProvider.provideArraySize(normalizedName, this, compiledBlock);
}
} |
java | @Override
public InputStream getResourceAsStream(String name) {
byte[] b = null;
try {
Asset resource = AssetCache.getAsset(mdwPackage.getName() + "/" + name);
if (resource != null)
b = resource.getRawContent();
if (b == null)
b = findInJarAssets(name);
if (b == null)
b = findInFileSystem(name);
}
catch (Exception ex) {
logger.severeException(ex.getMessage(), ex);
}
if (b == null)
return super.getResourceAsStream(name);
else
return new ByteArrayInputStream(b);
} |
python | def GetAllUserSummaries():
"""Returns a string containing summary info for all GRR users."""
grr_api = maintenance_utils.InitGRRRootAPI()
user_wrappers = sorted(grr_api.ListGrrUsers(), key=lambda x: x.username)
summaries = [_Summarize(w.data) for w in user_wrappers]
return "\n\n".join(summaries) |
java | public JType generate(JCodeModel codeModel, String className, String packageName, URL schemaUrl) {
JPackage jpackage = codeModel._package(packageName);
ObjectNode schemaNode = readSchema(schemaUrl);
return ruleFactory.getSchemaRule().apply(className, schemaNode, null, jpackage, new Schema(null, schemaNode, null));
} |
java | public static MultiLineString removeDuplicateCoordinates(MultiLineString multiLineString, double tolerance) throws SQLException {
ArrayList<LineString> lines = new ArrayList<LineString>();
for (int i = 0; i < multiLineString.getNumGeometries(); i++) {
LineString line = (LineString) multiLineString.getGeometryN(i);
lines.add(removeDuplicateCoordinates(line, tolerance));
}
return FACTORY.createMultiLineString(GeometryFactory.toLineStringArray(lines));
} |
java | @BetaApi
public final Policy getIamPolicyImage(ProjectGlobalImageResourceName resource) {
GetIamPolicyImageHttpRequest request =
GetIamPolicyImageHttpRequest.newBuilder()
.setResource(resource == null ? null : resource.toString())
.build();
return getIamPolicyImage(request);
} |
python | def run_job():
"""Takes an async object and executes its job."""
async = get_current_async()
async_options = async.get_options()
job = async_options.get('job')
if not job:
raise Exception('This async contains no job to execute!')
__, args, kwargs = job
if args is None:
args = ()
if kwargs is None:
kwargs = {}
function = async._decorate_job()
try:
async.executing = True
async.result = AsyncResult(payload=function(*args, **kwargs),
status=AsyncResult.SUCCESS)
except Abort as abort:
logging.info('Async job was aborted: %r', abort)
async.result = AsyncResult(status=AsyncResult.ABORT)
# QUESTION: In this eventuality, we should probably tell the context we
# are "complete" and let it handle completion checking.
_handle_context_completion_check(async)
return
except AbortAndRestart as restart:
logging.info('Async job was aborted and restarted: %r', restart)
raise
except BaseException as e:
async.result = AsyncResult(payload=encode_exception(e),
status=AsyncResult.ERROR)
_handle_results(async_options)
_handle_context_completion_check(async) |
java | public static tunnelip6_stats get(nitro_service service, String tunnelip6) throws Exception{
tunnelip6_stats obj = new tunnelip6_stats();
obj.set_tunnelip6(tunnelip6);
tunnelip6_stats response = (tunnelip6_stats) obj.stat_resource(service);
return response;
} |
java | public alluxio.grpc.WorkerNetAddressOrBuilder getWorkerAddressOrBuilder() {
return workerAddress_ == null ? alluxio.grpc.WorkerNetAddress.getDefaultInstance() : workerAddress_;
} |
java | public void addListeners()
{
String strMessage = "Create new user account";
String strTerms = this.getProperty("terms"); // Terms resource EY
if (strTerms == null)
strTerms = "terms";
if (this.getTask() != null)
if (this.getTask().getApplication() != null)
{
BaseApplication application = (BaseApplication)this.getTask().getApplication();
strMessage = application.getResources(ResourceConstants.ERROR_RESOURCE, true).getString(strMessage);
strTerms = application.getResources(ResourceConstants.DEFAULT_RESOURCE, true).getString(strTerms);
}
this.getScreenRecord().getField(UserScreenRecord.STATUS_LINE).setString(strMessage);
this.getScreenRecord().getField(UserScreenRecord.TERMS).setString(strTerms);
//x this.readCurrentUser();
super.addListeners();
FieldListener listener = this.getMainRecord().getField(UserInfo.USER_NAME).getListener(MainFieldHandler.class);
if (listener != null)
this.getMainRecord().getField(UserInfo.USER_NAME).removeListener(listener, true); // Don't read current accounts
this.getMainRecord().addListener(new UserPasswordHandler(false));
this.addAutoLoginHandler();
} |
python | def read_from_list_with_ids(self, lines):
"""
Read text fragments from a given list of tuples::
[(id_1, text_1), (id_2, text_2), ..., (id_n, text_n)].
:param list lines: the list of ``[id, text]`` fragments (see above)
"""
self.log(u"Reading text fragments from list with ids")
self._create_text_fragments([(line[0], [line[1]]) for line in lines]) |
python | def receiver_directory(self):
"""Parent directory of the downloads directory"""
if self._receiver_directory is None:
self._receiver_directory = self.downloads_directory.parent
return self._receiver_directory |
python | def main(api_endpoint, credentials,
device_model_id, device_id, lang, verbose,
input_audio_file, output_audio_file,
block_size, grpc_deadline, *args, **kwargs):
"""File based sample for the Google Assistant API.
Examples:
$ python -m audiofileinput -i <input file> -o <output file>
"""
# Setup logging.
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
# Load OAuth 2.0 credentials.
try:
with open(credentials, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None,
**json.load(f))
http_request = google.auth.transport.requests.Request()
credentials.refresh(http_request)
except Exception as e:
logging.error('Error loading credentials: %s', e)
logging.error('Run google-oauthlib-tool to initialize '
'new OAuth 2.0 credentials.')
sys.exit(-1)
# Create an authorized gRPC channel.
grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, api_endpoint)
logging.info('Connecting to %s', api_endpoint)
# Create gRPC stubs
assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(grpc_channel)
# Generate gRPC requests.
def gen_assist_requests(input_stream):
dialog_state_in = embedded_assistant_pb2.DialogStateIn(
language_code=lang,
conversation_state=b''
)
config = embedded_assistant_pb2.AssistConfig(
audio_in_config=embedded_assistant_pb2.AudioInConfig(
encoding='LINEAR16',
sample_rate_hertz=16000,
),
audio_out_config=embedded_assistant_pb2.AudioOutConfig(
encoding='LINEAR16',
sample_rate_hertz=16000,
volume_percentage=100,
),
dialog_state_in=dialog_state_in,
device_config=embedded_assistant_pb2.DeviceConfig(
device_id=device_id,
device_model_id=device_model_id,
)
)
# Send first AssistRequest message with configuration.
yield embedded_assistant_pb2.AssistRequest(config=config)
while True:
# Read user request from file.
data = input_stream.read(block_size)
if not data:
break
# Send following AssitRequest message with audio chunks.
yield embedded_assistant_pb2.AssistRequest(audio_in=data)
for resp in assistant.Assist(gen_assist_requests(input_audio_file),
grpc_deadline):
# Iterate on AssistResponse messages.
if resp.event_type == END_OF_UTTERANCE:
logging.info('End of audio request detected')
if resp.speech_results:
logging.info('Transcript of user request: "%s".',
' '.join(r.transcript
for r in resp.speech_results))
if len(resp.audio_out.audio_data) > 0:
# Write assistant response to supplied file.
output_audio_file.write(resp.audio_out.audio_data)
if resp.dialog_state_out.supplemental_display_text:
logging.info('Assistant display text: "%s"',
resp.dialog_state_out.supplemental_display_text)
if resp.device_action.device_request_json:
device_request = json.loads(resp.device_action.device_request_json)
logging.info('Device request: %s', device_request) |
java | public static Date parse(String dateString)
throws ParseException {
// Return null if no date provided
if (dateString == null || dateString.isEmpty())
return null;
// Parse date according to format
DateFormat dateFormat = new SimpleDateFormat(DateField.FORMAT);
return dateFormat.parse(dateString);
} |
python | def stream(self, transaction=None):
"""Read the documents in this collection.
This sends a ``RunQuery`` RPC and then returns an iterator which
consumes each document returned in the stream of ``RunQueryResponse``
messages.
.. note::
The underlying stream of responses will time out after
the ``max_rpc_timeout_millis`` value set in the GAPIC
client configuration for the ``RunQuery`` API. Snapshots
not consumed from the iterator before that point will be lost.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that the query will
run in.
Yields:
~.firestore_v1beta1.document.DocumentSnapshot: The next
document that fulfills the query.
"""
query = query_mod.Query(self)
return query.stream(transaction=transaction) |
python | def _EccZmaxRperiRap(self,*args,**kwargs):
"""
NAME:
EccZmaxRperiRap (_EccZmaxRperiRap)
PURPOSE:
evaluate the eccentricity, maximum height above the plane, peri- and apocenter in the Staeckel approximation
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
delta= (object-wide default) can be used to override the object-wide focal length; can also be an array with length N to allow different delta for different phase-space points
u0= (None) if object-wide option useu0 is set, u0 to use (if useu0 and useu0 is None, a good value will be computed)
c= (object-wide default, bool) True/False to override the object-wide setting for whether or not to use the C implementation
OUTPUT:
(e,zmax,rperi,rap)
HISTORY:
2017-12-12 - Written - Bovy (UofT)
"""
delta= kwargs.get('delta',self._delta)
umin, umax, vmin= self._uminumaxvmin(*args,**kwargs)
rperi= bovy_coords.uv_to_Rz(umin,nu.pi/2.,delta=delta)[0]
rap_tmp, zmax= bovy_coords.uv_to_Rz(umax,vmin,delta=delta)
rap= nu.sqrt(rap_tmp**2.+zmax**2.)
e= (rap-rperi)/(rap+rperi)
return (e,zmax,rperi,rap) |
python | def parse_events(cls, ev_args, parent_ctx):
"""
Capture the events sent to :meth:`.XSO.parse_events`,
including the initial `ev_args` to a list and call
:meth:`_set_captured_events` on the result of
:meth:`.XSO.parse_events`.
Like the method it overrides, :meth:`parse_events` is suspendable.
"""
dest = [("start", )+tuple(ev_args)]
result = yield from capture_events(
super().parse_events(ev_args, parent_ctx),
dest
)
result._set_captured_events(dest)
return result |
python | def build_return_url(self):
'''
If the Tool Consumer sent a return URL, add any set messages to the
URL.
'''
if not self.launch_presentation_return_url:
return None
lti_message_fields = ['lti_errormsg', 'lti_errorlog',
'lti_msg', 'lti_log']
messages = dict([(key, getattr(self, key))
for key in lti_message_fields
if getattr(self, key, None)])
# Disassemble original return URL and reassemble with our options added
original = urlsplit(self.launch_presentation_return_url)
combined = messages.copy()
combined.update(dict(parse_qsl(original.query)))
combined_query = urlencode(combined)
return urlunsplit((
original.scheme,
original.netloc,
original.path,
combined_query,
original.fragment
)) |
java | private void setRequestLanguages(WbGetEntitiesActionData properties) {
if (this.filter.excludeAllLanguages()
|| this.filter.getLanguageFilter() == null) {
return;
}
properties.languages = ApiConnection.implodeObjects(this.filter
.getLanguageFilter());
} |
java | @Override
public final void setHasName(final CatalogGs pHasName) {
this.hasName = pHasName;
if (this.itsId == null) {
this.itsId = new IdI18nCatalogGs();
}
this.itsId.setHasName(this.hasName);
} |
python | def p_ExtendedAttributeArgList(p):
"""ExtendedAttributeArgList : IDENTIFIER "(" ArgumentList ")"
"""
p[0] = model.ExtendedAttribute(
value=model.ExtendedAttributeValue(name=p[1], arguments=p[3])) |
java | void storeBlock(BlockId blockId, ByteBuffer block) throws IOException {
synchronized (m_accessLock) {
if (m_blockPathMap.containsKey(blockId)) {
throw new IllegalArgumentException("Request to store block that is already stored: "
+ blockId.toString());
}
int origPosition = block.position();
block.position(0);
Path blockPath = makeBlockPath(blockId);
try (SeekableByteChannel channel = Files.newByteChannel(blockPath, OPEN_OPTIONS, PERMISSIONS)) {
channel.write(block);
}
finally {
block.position(origPosition);
}
m_blockPathMap.put(blockId, blockPath);
}
} |
python | def _writeSentenceInBlock(sentence, blockID, sentenceID):
'''writes the sentence in a block to a file with the id'''
with open("sentenceIDs.txt", "a") as fp:
fp.write("sentenceID: "+str(blockID)+"_"+str(sentenceID)+"\n")
fp.write("sentence string: "+sentence+"\n")
fp.write("\n") |
java | public final void mLCURLY() throws RecognitionException {
try {
int _type = LCURLY;
int _channel = DEFAULT_TOKEN_CHANNEL;
// druidG.g:575:8: ( '{' )
// druidG.g:575:11: '{'
{
match('{');
}
state.type = _type;
state.channel = _channel;
}
finally {
// do for sure before leaving
}
} |
python | def id_lookup(paper_id, idtype):
"""Take an ID of type PMID, PMCID, or DOI and lookup the other IDs.
If the DOI is not found in Pubmed, try to obtain the DOI by doing a
reverse-lookup of the DOI in CrossRef using article metadata.
Parameters
----------
paper_id : str
ID of the article.
idtype : str
Type of the ID: 'pmid', 'pmcid', or 'doi
Returns
-------
ids : dict
A dictionary with the following keys: pmid, pmcid and doi.
"""
if idtype not in ('pmid', 'pmcid', 'doi'):
raise ValueError("Invalid idtype %s; must be 'pmid', 'pmcid', "
"or 'doi'." % idtype)
ids = {'doi': None, 'pmid': None, 'pmcid': None}
pmc_id_results = pmc_client.id_lookup(paper_id, idtype)
# Start with the results of the PMC lookup and then override with the
# provided ID
ids['pmid'] = pmc_id_results.get('pmid')
ids['pmcid'] = pmc_id_results.get('pmcid')
ids['doi'] = pmc_id_results.get('doi')
ids[idtype] = paper_id
# If we gave a DOI, then our work is done after looking for PMID and PMCID
if idtype == 'doi':
return ids
# If we gave a PMID or PMCID, we need to check to see if we got a DOI.
# If we got a DOI back, we're done.
elif ids.get('doi'):
return ids
# If we get here, then we've given PMID or PMCID and don't have a DOI yet.
# If we gave a PMCID and have neither a PMID nor a DOI, then we'll run
# into problems later on when we try to the reverse lookup using CrossRef.
# So we bail here and return what we have (PMCID only) with a warning.
if ids.get('pmcid') and ids.get('doi') is None and ids.get('pmid') is None:
logger.warning('%s: PMCID without PMID or DOI' % ids.get('pmcid'))
return ids
# To clarify the state of things at this point:
assert ids.get('pmid') is not None
assert ids.get('doi') is None
# As a last result, we try to get the DOI from CrossRef (which internally
# tries to get the DOI from Pubmed in the process of collecting the
# necessary metadata for the lookup):
ids['doi'] = crossref_client.doi_query(ids['pmid'])
# It may still be None, but at this point there's nothing we can do...
return ids |
python | def _get_all_offsets(self, offset_ns=None):
"""
returns all token offsets of this document as a generator of
(token node ID str, character onset int, character offset int) tuples.
Parameters
----------
offset_ns : str or None
The namespace from which the offsets will be retrieved. If no
namespace is given, the default namespace of this document graph is
chosen
Returns
-------
offsets : generator(tuple(str, int, int))
a generator of (token node ID str, character onset int, character
offset int) tuples, which represents all the tokens in the order
they occur in the document.
"""
for token_id, _token_str in self.get_tokens():
onset = self.node[token_id]['{0}:{1}'.format(offset_ns, 'onset')]
offset = self.node[token_id]['{0}:{1}'.format(offset_ns, 'offset')]
yield (token_id, onset, offset) |
python | def WriteSignedBinary(binary_urn,
binary_content,
private_key,
public_key,
chunk_size = 1024,
token = None):
"""Signs a binary and saves it to the datastore.
If a signed binary with the given URN already exists, its contents will get
overwritten.
Args:
binary_urn: URN that should serve as a unique identifier for the binary.
binary_content: Contents of the binary, as raw bytes.
private_key: Key that should be used for signing the binary contents.
public_key: Key that should be used to verify the signature generated using
the private key.
chunk_size: Size, in bytes, of the individual blobs that the binary contents
will be split to before saving to the datastore.
token: ACL token to use with the legacy (non-relational) datastore.
"""
if _ShouldUseLegacyDatastore():
collects.GRRSignedBlob.NewFromContent(
binary_content,
binary_urn,
chunk_size=chunk_size,
token=token,
private_key=private_key,
public_key=public_key)
if data_store.RelationalDBEnabled():
blob_references = rdf_objects.BlobReferences()
for chunk_offset in range(0, len(binary_content), chunk_size):
chunk = binary_content[chunk_offset:chunk_offset + chunk_size]
blob_rdf = rdf_crypto.SignedBlob()
blob_rdf.Sign(chunk, private_key, verify_key=public_key)
blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(
blob_rdf.SerializeToString())
blob_references.items.Append(
rdf_objects.BlobReference(
offset=chunk_offset, size=len(chunk), blob_id=blob_id))
data_store.REL_DB.WriteSignedBinaryReferences(
_SignedBinaryIDFromURN(binary_urn), blob_references) |
python | def image_from_file(filename,
remote_addr=None,
cert=None,
key=None,
verify_cert=True,
aliases=None,
public=False,
saltenv='base',
_raw=False):
''' Create an image from a file
filename :
The filename of the rootfs
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
aliases : []
List of aliases to append to the copied image
public : False
Make this image public available
saltenv : base
The saltenv to use for salt:// copies
_raw : False
Return the raw pylxd object or a dict of the image?
CLI Examples:
..code-block:: bash
$ salt '*' lxd.image_from_file salt://lxd/files/busybox.tar.xz aliases=["busybox-amd64"]
'''
if aliases is None:
aliases = []
cached_file = __salt__['cp.cache_file'](filename, saltenv=saltenv)
data = b''
with salt.utils.files.fopen(cached_file, 'r+b') as fp:
data = fp.read()
client = pylxd_client_get(remote_addr, cert, key, verify_cert)
try:
image = client.images.create(data, public=public, wait=True)
except pylxd.exceptions.LXDAPIException as e:
raise CommandExecutionError(six.text_type(e))
# Aliases support
for alias in aliases:
image_alias_add(image, alias)
if _raw:
return image
return _pylxd_model_to_dict(image) |
java | static void Print(AddressBook addressBook) {
for (Person person: addressBook.getPeopleList()) {
System.out.println("Person ID: " + person.getId());
System.out.println(" Name: " + person.getName());
if (!person.getEmail().isEmpty()) {
System.out.println(" E-mail address: " + person.getEmail());
}
for (Person.PhoneNumber phoneNumber : person.getPhonesList()) {
switch (phoneNumber.getType()) {
case MOBILE:
System.out.print(" Mobile phone #: ");
break;
case HOME:
System.out.print(" Home phone #: ");
break;
case WORK:
System.out.print(" Work phone #: ");
break;
}
System.out.println(phoneNumber.getNumber());
}
}
} |
python | def mqp_lm1b_base():
"""Series of architectures for language modeling."""
hparams = mtf_transformer2.mtf_unitransformer_base()
hparams.d_model = 1024
hparams.max_length = 256
hparams.batch_size = 256
# Parameters for my_layer_stack()
hparams.num_hidden_layers = 6
hparams.d_ff = 8192
hparams.d_kv = 128
hparams.num_heads = 8
hparams.learning_rate_decay_steps = 13600
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
return hparams |
python | def reindex(report):
"""Reindex report so that 'TOTAL' is the last row"""
index = list(report.index)
i = index.index('TOTAL')
return report.reindex(index[:i] + index[i+1:] + ['TOTAL']) |
python | def load_velo_scan(file):
"""Load and parse a velodyne binary file."""
scan = np.fromfile(file, dtype=np.float32)
return scan.reshape((-1, 4)) |
java | protected void openEditDialog(boolean isNew, String mode) {
// create a form to submit a post request to the editor JSP
Map<String, String> formValues = new HashMap<String, String>();
if (m_editableData.getSitePath() != null) {
formValues.put("resource", m_editableData.getSitePath());
}
if (m_editableData.getElementLanguage() != null) {
formValues.put("elementlanguage", m_editableData.getElementLanguage());
}
if (m_editableData.getElementName() != null) {
formValues.put("elementname", m_editableData.getElementName());
}
String backlink = CmsCoreProvider.get().getUri();
if (Window.Location.getPath().endsWith(backlink)) {
// CmsCoreProvider.get().getUri() is the request context uri from the time the direct edit provider
// includes are generated. In case the template has changed the request context uri before that point,
// we don't append the request parameters, as they may be inappropriate for the new URI.
backlink += Window.Location.getQueryString();
}
formValues.put("backlink", backlink);
formValues.put("redirect", "true");
formValues.put("directedit", "true");
formValues.put("nofoot", "1");
formValues.put("editcontext", CmsCoreProvider.get().getUri());
String postCreateHandler = m_editableData.getPostCreateHandler();
if (postCreateHandler != null) {
formValues.put(CmsEditorConstants.PARAM_POST_CREATE_HANDLER, postCreateHandler);
}
if (mode != null) {
formValues.put(CmsEditorConstants.PARAM_MODE, mode);
}
if (isNew) {
formValues.put("newlink", m_editableData.getNewLink());
formValues.put("editortitle", m_editableData.getNewTitle());
}
FormElement formElement = CmsDomUtil.generateHiddenForm(
CmsCoreProvider.get().link(CmsCoreProvider.get().getContentEditorUrl()),
Method.post,
Target.TOP,
formValues);
getMarkerTag().appendChild(formElement);
formElement.submit();
} |
python | def get_my_item_id_from_section(self, section):
"""returns the first item associated with this magic Part Id in the Section"""
for question_map in section._my_map['questions']:
if question_map['assessmentPartId'] == str(self.get_id()):
return section.get_question(question_map=question_map).get_id()
raise IllegalState('This Part currently has no Item in the Section') |
java | public Observable<Page<VirtualNetworkInner>> listByResourceGroupAsync(final String resourceGroupName) {
return listByResourceGroupWithServiceResponseAsync(resourceGroupName)
.map(new Func1<ServiceResponse<Page<VirtualNetworkInner>>, Page<VirtualNetworkInner>>() {
@Override
public Page<VirtualNetworkInner> call(ServiceResponse<Page<VirtualNetworkInner>> response) {
return response.body();
}
});
} |
java | private String getPrefix()
{
final String prefix;
if (loader.isPresent())
{
prefix = loader.get().getPackage().getName().replace(Constant.DOT, File.separator);
}
else
{
prefix = resourcesDir;
}
return prefix;
} |
java | private int getDatastreamPaneIndex(String id) {
int index = -1;
for (int i=0; i < m_datastreamPanes.length; i++)
{
if(m_datastreamPanes[i].getItemId().equals(id)){
index = i;
break;
}
}
return index;
} |
python | def execute(self, X):
"""Execute the program according to X.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
y_hats : array-like, shape = [n_samples]
The result of executing the program on X.
"""
# Check for single-node programs
node = self.program[0]
if isinstance(node, float):
return np.repeat(node, X.shape[0])
if isinstance(node, int):
return X[:, node]
apply_stack = []
for node in self.program:
if isinstance(node, _Function):
apply_stack.append([node])
else:
# Lazily evaluate later
apply_stack[-1].append(node)
while len(apply_stack[-1]) == apply_stack[-1][0].arity + 1:
# Apply functions that have sufficient arguments
function = apply_stack[-1][0]
terminals = [np.repeat(t, X.shape[0]) if isinstance(t, float)
else X[:, t] if isinstance(t, int)
else t for t in apply_stack[-1][1:]]
intermediate_result = function(*terminals)
if len(apply_stack) != 1:
apply_stack.pop()
apply_stack[-1].append(intermediate_result)
else:
return intermediate_result
# We should never get here
return None |
java | public Option defaultValue(String defaultValue) throws RequiredParametersException {
if (this.choices.isEmpty()) {
return this.setDefaultValue(defaultValue);
} else {
if (this.choices.contains(defaultValue)) {
return this.setDefaultValue(defaultValue);
} else {
throw new RequiredParametersException("Default value " + defaultValue +
" is not in the list of valid values for option " + this.longName);
}
}
} |
java | @com.fasterxml.jackson.annotation.JsonProperty("ErrorDetails")
public java.util.List<ErrorDetail> getErrorDetails() {
return errorDetails;
} |
python | def process_superclass(self, entity: List[dict]) -> List[dict]:
""" Replaces ILX ID with superclass ID """
superclass = entity.pop('superclass')
label = entity['label']
if not superclass.get('ilx_id'):
raise self.SuperClassDoesNotExistError(
f'Superclass not given an interlex ID for label: {label}')
superclass_data = self.get_entity(superclass['ilx_id'])
if not superclass_data['id']:
raise self.SuperClassDoesNotExistError(
'Superclass ILX ID: ' + superclass['ilx_id'] + ' does not exist in SciCrunch')
# BUG: only excepts superclass_tid
entity['superclasses'] = [{'superclass_tid': superclass_data['id']}]
return entity |
python | def rotate2d(self, theta, origin=None, axis='z', radians=False):
'''
:theta: float radians to rotate self around origin
:origin: optional Point, defaults to 0,0,0
Returns a Point rotated by :theta: around :origin:.
'''
origin = Point._convert(origin)
delta = self - origin
p = Point(origin)
if not radians:
theta = math.radians(theta)
cosT = math.cos(theta)
sinT = math.sin(theta)
if axis == 'z':
p.x += (cosT * delta.x) - (sinT * delta.y)
p.y += (sinT * delta.x) + (cosT * delta.y)
return p
if axis == 'y':
p.z += (cosT * delta.z) - (sinT * delta.x)
p.x += (sinT * delta.z) + (cosT * delta.x)
return p
if axis == 'x':
p.y += (cosT * delta.y) - (sinT * delta.z)
p.z += (sinT * delta.y) + (cosT * delta.z)
return p
raise KeyError('unknown axis {}, expecting x, y or z'.format(axis)) |
java | public static synchronized short getNearestColor(
final Color awtColor) {
if (triplets == null) {
triplets = HSSFColor.getTripletHash();
}
if (triplets == null || triplets.isEmpty()) {
System.out.println("Unable to get triplet hashtable");
return HSSFColor.BLACK.index;
}
short color = HSSFColor.BLACK.index;
double minDiff = Double.MAX_VALUE;
// get the color without the alpha chanel
final float[] hsb = Color.RGBtoHSB(awtColor.getRed(), awtColor
.getGreen(), awtColor.getBlue(), null);
float[] excelHsb = null;
final Iterator elements = triplets.values().iterator();
while (elements.hasNext()) {
final HSSFColor crtColor = (HSSFColor) elements.next();
final short[] rgb = crtColor.getTriplet();
excelHsb = Color.RGBtoHSB(rgb[0], rgb[1], rgb[2], excelHsb);
final double weight = 3.0d * Math.abs(excelHsb[0] - hsb[0])
+ Math.abs(excelHsb[1] - hsb[1])
+ Math.abs(excelHsb[2] - hsb[2]);
if (weight < minDiff) {
minDiff = weight;
if (minDiff == 0) {
// we found the color ...
return crtColor.getIndex();
}
color = crtColor.getIndex();
}
}
return color;
} |
python | def render_html(input_text, **context):
"""
A module-level convenience method that creates a default bbcode parser,
and renders the input string as HTML.
"""
global g_parser
if g_parser is None:
g_parser = Parser()
return g_parser.format(input_text, **context) |
python | def set_backup_heartbeat(self, interface_id):
"""
Set this interface as the backup heartbeat interface.
Clusters and Master NGFW Engines only.
:param str,int interface_id: interface as backup
:raises InterfaceNotFound: specified interface is not found
:raises UpdateElementFailed: failure to update interface
:return: None
"""
self.interface.set_unset(interface_id, 'backup_heartbeat')
self._engine.update() |
python | def hashjoin(left, right, key=None, lkey=None, rkey=None, cache=True,
lprefix=None, rprefix=None):
"""Alternative implementation of :func:`petl.transform.joins.join`, where
the join is executed by constructing an in-memory lookup for the right
hand table, then iterating over rows from the left hand table.
May be faster and/or more resource efficient where the right table is small
and the left table is large.
By default data from right hand table is cached to improve performance
(only available when `key` is given).
Left and right tables with different key fields can be handled via the
`lkey` and `rkey` arguments.
"""
lkey, rkey = keys_from_args(left, right, key, lkey, rkey)
return HashJoinView(left, right, lkey=lkey, rkey=rkey, cache=cache,
lprefix=lprefix, rprefix=rprefix) |
python | def rgb2hex(rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1]
return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb)) |
python | def setup(parser):
"""Add common sampling options to CLI parser.
Parameters
----------
parser : argparse object
Returns
----------
Updated argparse object
"""
parser.add_argument(
'-p', '--paramfile', type=str, required=True,
help='Parameter Range File')
parser.add_argument(
'-o', '--output', type=str, required=True, help='Output File')
parser.add_argument(
'-s', '--seed', type=int, required=False, default=None,
help='Random Seed')
parser.add_argument(
'--delimiter', type=str, required=False, default=' ',
help='Column delimiter')
parser.add_argument('--precision', type=int, required=False,
default=8, help='Output floating-point precision')
return parser |
java | @SuppressWarnings("SameParameterValue") // Using same method params as in restrictStateBounds
@Nullable
State restrictStateBoundsCopy(State state, State prevState, float pivotX, float pivotY,
boolean allowOverscroll, boolean allowOverzoom, boolean restrictRotation) {
tmpState.set(state);
boolean changed = restrictStateBounds(tmpState, prevState, pivotX, pivotY,
allowOverscroll, allowOverzoom, restrictRotation);
return changed ? tmpState.copy() : null;
} |
python | def extendInformation(self, response):
"""
This extends the objects stdout and stderr by
'response's stdout and stderr
"""
if response.stdout:
self.stdout += '\r\n' + response.stdout
if response.stderr:
self.stderr += '\r\n' + response.stderr |
java | protected void writeOperand (final float real) throws IOException
{
final int byteCount = NumberFormatUtil.formatFloatFast (real,
formatDecimal.getMaximumFractionDigits (),
formatBuffer);
if (byteCount == -1)
{
// Fast formatting failed
write (formatDecimal.format (real));
}
else
{
m_aOS.write (formatBuffer, 0, byteCount);
}
m_aOS.write (' ');
} |
python | def _find_classes_param(self):
"""
Searches the wrapped model for the classes_ parameter.
"""
for attr in ["classes_"]:
try:
return getattr(self.estimator, attr)
except AttributeError:
continue
raise YellowbrickTypeError(
"could not find classes_ param on {}".format(
self.estimator.__class__.__name__
)
) |
python | def splittermixerfieldlists(data, commdct, objkey):
"""docstring for splittermixerfieldlists"""
objkey = objkey.upper()
objindex = data.dtls.index(objkey)
objcomms = commdct[objindex]
theobjects = data.dt[objkey]
fieldlists = []
for theobject in theobjects:
fieldlist = list(range(1, len(theobject)))
fieldlists.append(fieldlist)
return fieldlists |
java | static void addToRecentConnectionSettings(Hashtable settings,
ConnectionSetting newSetting) throws IOException {
settings.put(newSetting.getName(), newSetting);
ConnectionDialogCommon.storeRecentConnectionSettings(settings);
} |
java | public com.google.privacy.dlp.v2.PrivacyMetric.LDiversityConfigOrBuilder
getLDiversityConfigOrBuilder() {
if (typeCase_ == 4) {
return (com.google.privacy.dlp.v2.PrivacyMetric.LDiversityConfig) type_;
}
return com.google.privacy.dlp.v2.PrivacyMetric.LDiversityConfig.getDefaultInstance();
} |
python | def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True |
python | def Header(self):
"""
Get the block header.
Returns:
neo.Core.Header:
"""
if not self._header:
self._header = Header(self.PrevHash, self.MerkleRoot, self.Timestamp,
self.Index, self.ConsensusData, self.NextConsensus, self.Script)
return self._header |
java | public String getRemoteAddr()
{
String addr = "127.0.0.1";
HttpConnection connection = getHttpConnection();
if (connection != null)
{
addr = connection.getRemoteAddr();
if (addr == null) addr = connection.getRemoteHost();
}
return addr;
} |
python | def get_torque_state(self):
""" get the torque state of motor
Returns:
bool: True if torque is enabled, else False
"""
data = []
data.append(0x09)
data.append(self.servoid)
data.append(RAM_READ_REQ)
data.append(TORQUE_CONTROL_RAM)
data.append(BYTE2)
send_data(data)
rxdata = []
try:
rxdata = SERPORT.read(13)
return bool(ord(rxdata[9]))
except HerkulexError:
raise HerkulexError("could not communicate with motors") |
java | public static boolean setCurrentFile(File databaseDir, long descriptorNumber)
throws IOException
{
String manifest = descriptorFileName(descriptorNumber);
String temp = tempFileName(descriptorNumber);
File tempFile = new File(databaseDir, temp);
writeStringToFileSync(manifest + "\n", tempFile);
File to = new File(databaseDir, currentFileName());
boolean ok = tempFile.renameTo(to);
if (!ok) {
tempFile.delete();
writeStringToFileSync(manifest + "\n", to);
}
return ok;
} |
java | @Override
public List<com.enioka.jqm.api.Deliverable> getJobDeliverables(int idJob)
{
DbConn cnx = null;
try
{
cnx = getDbSession();
// TODO: no intermediate entity here: directly SQL => API object.
List<Deliverable> deliverables = Deliverable.select(cnx, "deliverable_select_all_for_ji", idJob);
List<com.enioka.jqm.api.Deliverable> res = new ArrayList<>();
for (Deliverable d : deliverables)
{
res.add(new com.enioka.jqm.api.Deliverable(d.getFilePath(), d.getFileFamily(), d.getId(), d.getOriginalFileName()));
}
return res;
}
catch (Exception e)
{
throw new JqmClientException("Could not query files for job instance " + idJob, e);
}
finally
{
closeQuietly(cnx);
}
} |
java | public ValueInterval<T, I, V> withValue(V value) {
return new ValueInterval<>(this.interval, value);
} |
java | public void setTime(final long millis) {
int millisOfDay = ISOChronology.getInstanceUTC().millisOfDay().get(millis);
setMillis(getChronology().millisOfDay().set(getMillis(), millisOfDay));
} |
python | def _call(callback, args=[], kwargs={}):
"""
Calls a callback with optional args and keyword args lists. This method exists so
we can inspect the `_max_calls` attribute that's set by `_on`. If this value is None,
the callback is considered to have no limit. Otherwise, an integer value is expected
and decremented until there are no remaining calls
"""
if not hasattr(callback, '_max_calls'):
callback._max_calls = None
# None implies no callback limit
if callback._max_calls is None:
return _call_partial(callback, *args, **kwargs)
# Should the signal be disconnected?
if callback._max_calls <= 0:
return disconnect(callback)
callback._max_calls -= 1
return _call_partial(callback, *args, **kwargs) |
java | @Override
public <T> T convertValues(Collection<String> input, Class<T> rawType, Type type, String defaultValue) throws IllegalArgumentException {
if (rawType.isArray()) {
if (input == null) {
input = getMultipleValues(defaultValue, null);
}
return createArray(input, rawType.getComponentType());
} else if (Collection.class.isAssignableFrom(rawType)) {
if (input == null) {
input = getMultipleValues(defaultValue, null);
}
return createCollection(input, rawType, type);
} else {
return convertSingleValue(input, rawType, defaultValue);
}
} |
java | public boolean removeCustomer(Long customerId) {
Customer customer = getCustomer(customerId);
if(customer != null){
customerRepository.delete(customer);
return true;
}
return false;
} |
python | def connect(self):
"""Connect to wdb server"""
log.info('Connecting socket on %s:%d' % (self.server, self.port))
tries = 0
while not self._socket and tries < 10:
try:
time.sleep(.2 * tries)
self._socket = Socket((self.server, self.port))
except socket.error:
tries += 1
log.warning(
'You must start/install wdb.server '
'(Retrying on %s:%d) [Try #%d/10]' %
(self.server, self.port, tries)
)
self._socket = None
if not self._socket:
log.warning('Could not connect to server')
return
Wdb._sockets.append(self._socket)
self._socket.send_bytes(self.uuid.encode('utf-8')) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.