language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public void setRef(Constituent v) {
if (PTBConstituent_Type.featOkTst && ((PTBConstituent_Type)jcasType).casFeat_ref == null)
jcasType.jcas.throwFeatMissing("ref", "de.julielab.jules.types.PTBConstituent");
jcasType.ll_cas.ll_setRefValue(addr, ((PTBConstituent_Type)jcasType).casFeatCode_ref, jcasType.ll_cas.ll_getFSRef(v));} |
java | public FileSystemDataset load (final State state) {
return new FileSystemDataset() {
@Override
public Path datasetRoot() {
return new Path(state.getProp(SERIALIZE_COMPACTION_FILE_PATH_NAME));
}
@Override
public String datasetURN() {
return state.getProp(SERIALIZE_COMPACTION_FILE_PATH_NAME);
}
};
} |
java | @Override
public final void setJMSDeliveryMode(int deliveryMode) throws JMSException
{
if (deliveryMode != DeliveryMode.PERSISTENT &&
deliveryMode != DeliveryMode.NON_PERSISTENT)
throw new FFMQException("Invalid delivery mode : "+deliveryMode,"INVALID_DELIVERY_MODE");
assertDeserializationLevel(MessageSerializationLevel.FULL);
this.deliveryMode = deliveryMode;
} |
java | public void matches(Session other) throws ClientException {
compareTransferParams(other);
compareServerMode(other);
if (needsGridFTP() && transferMode != MODE_EBLOCK) {
throw new ClientException(ClientException.BAD_MODE,
"Extended block mode necessary");
}
if (other instanceof GridFTPSession &&
((GridFTPSession) other).needsGridFTP() &&
transferMode != MODE_EBLOCK) {
throw new ClientException(ClientException.BAD_MODE,
"Extended block mode necessary");
}
} |
python | def read_frame(self):
"""
Read an AMQP frame.
"""
frame_type, channel, size = unpack('>BHI', self._read(7))
payload = self._read(size)
ch = ord(self._read(1))
if ch == 206: # '\xce'
return frame_type, channel, payload
else:
raise Exception('Framing Error, received 0x%02x while expecting 0xce' % ch) |
java | @Pure
public static URL findLibraryURL(String libName) {
return findLibraryURL(null, libName, null, null);
} |
python | def filter(self,
pos_condition=None,
stopwords=None,
is_normalize=True,
func_normalizer=normalize_text,
check_field_name='stem'):
# type: (List[Tuple[text_type,...]], List[text_type], bool, Callable[[text_type], text_type],text_type)->FilteredObject
"""* What you can do
- It filters out token which does NOT meet the conditions (stopwords & part-of-speech tag)
- Under python2.x, pos_condition & stopwords are converted into unicode type.
* Parameters
- pos_condition: list of part-of-speech(pos) condition. The pos condition is tuple is variable length.
You can specify hierarchical structure of pos condition with variable tuple.
The hierarchy of pos condition follows definition of dictionary.
- For example, in mecab you can take words with 名詞 if ('名詞',)
- For example, in mecab you can take words with 名詞-固有名詞 if ('名詞', '固有名詞')
- stopwords: list of word which you would like to remove
- is_normalize: Boolean flag for normalize stopwords.
- func_normalizer: Function object for normalization. The function object must be the same one as when you use tokenize.
- check_field_name: Put field name to check if stopword or NOT. Kytea does not have stem form of word, put 'surface' instead.
* Example
>>> pos_condition = [('名詞', '一般'), ('形容詞', '自立'), ('助詞', '格助詞', '一般')]
>>> stopwords = ['これ', 'それ']
"""
assert isinstance(pos_condition, (type(None), list))
assert isinstance(stopwords, (type(None), list))
if stopwords is None:
s_words = []
elif six.PY2 and all((isinstance(s, str) for s in stopwords)):
"""under python2.x, from str into unicode"""
if is_normalize:
s_words = [func_normalizer(s.decode(self.string_encoding)) for s in stopwords]
else:
s_words = [s.decode(self.string_encoding) for s in stopwords]
else:
if is_normalize:
s_words = [func_normalizer(s) for s in stopwords]
else:
s_words = stopwords
if pos_condition is None:
p_condition = []
else:
p_condition = self.__check_pos_condition(pos_condition)
filtered_object = filter_words(
tokenized_obj=self,
valid_pos=p_condition,
stopwords=s_words,
check_field_name=check_field_name
)
assert isinstance(filtered_object, FilteredObject)
return filtered_object |
java | public void destroy() {
try {
super.destroy();
} finally {
if (defaultJedisConnector != null && myOwnRedis) {
try {
defaultJedisConnector.destroy();
} catch (Exception e) {
LOGGER.warn(e.getMessage(), e);
} finally {
defaultJedisConnector = null;
}
}
}
} |
java | public Task<AwsSesSendResult> sendEmail(
@NonNull final String to,
@NonNull final String from,
@NonNull final String subject,
@NonNull final String body) {
return dispatcher.dispatchTask(new Callable<AwsSesSendResult>() {
@Override
public AwsSesSendResult call() {
return proxy.sendEmail(to, from, subject, body);
}
});
} |
java | public <T extends Pointer> T getHelperWorkspace(String key){
return helperWorkspacePointers == null ? null : (T)helperWorkspacePointers.get(key);
} |
java | public ListObjectParentsResult withParentLinks(ObjectIdentifierAndLinkNameTuple... parentLinks) {
if (this.parentLinks == null) {
setParentLinks(new java.util.ArrayList<ObjectIdentifierAndLinkNameTuple>(parentLinks.length));
}
for (ObjectIdentifierAndLinkNameTuple ele : parentLinks) {
this.parentLinks.add(ele);
}
return this;
} |
python | def push(self, channel_id, data):
"""Push message with POST ``data`` for ``channel_id``
"""
channel_path = self.channel_path(channel_id)
response = requests.post(channel_path, data)
return response.json() |
python | def merge_pairs(self):
"""
Use bbmerge from the bbmap suite of tools to merge paired-end reads
"""
logging.info('Merging paired reads')
for sample in self.metadata:
# Can only merge paired-end
if len(sample.general.fastqfiles) == 2:
# Set the name of the merged, and unmerged files
sample.general.mergedreads = \
os.path.join(sample.general.outputdirectory, '{}_paired.fastq.gz'.format(sample.name))
sample.general.unmergedforward = \
os.path.join(sample.general.outputdirectory, '{}_unpaired_R1.fastq.gz'.format(sample.name))
sample.general.unmergedreverse = \
os.path.join(sample.general.outputdirectory, '{}_unpaired_R2.fastq.gz'.format(sample.name))
try:
# Run the merging command - forward_in=sample.general.normalisedreads[0],
out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0],
merged_reads=sample.general.mergedreads,
returncmd=True,
outu1=sample.general.unmergedforward,
outu2=sample.general.unmergedreverse,
threads=self.cpus)
sample[self.analysistype].bbmergecmd = cmd
write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)
except CalledProcessError:
delattr(sample.general, 'mergedreads')
delattr(sample.general, 'unmergedforward')
delattr(sample.general, 'unmergedreverse')
except IndexError:
delattr(sample.general, 'mergedreads')
delattr(sample.general, 'unmergedforward')
delattr(sample.general, 'unmergedreverse')
else:
sample.general.mergedreads = sorted(sample.general.trimmedcorrectedfastqfiles)[0] |
java | public static CmsXmlPage unmarshal(CmsObject cms, CmsFile file) throws CmsXmlException {
return CmsXmlPageFactory.unmarshal(cms, file, true);
} |
python | def remove_router_from_hosting_device(self, client, hosting_device_id,
router_id):
"""Remove a router from hosting_device."""
res_path = hostingdevice.HostingDevice.resource_path
return client.delete((res_path + DEVICE_L3_ROUTERS + "/%s") % (
hosting_device_id, router_id)) |
python | def get_records(self, name):
"""Return all the records for the given name in the cache.
Args:
name (string): The name which the required models are stored under.
Returns:
list: A list of :class:`cinder_data.model.CinderModel` models.
"""
if name in self._cache:
return self._cache[name].values()
else:
return [] |
java | public Observable<ServiceResponse<Page<FeatureResultInner>>> list1NextWithServiceResponseAsync(final String nextPageLink) {
return list1NextSinglePageAsync(nextPageLink)
.concatMap(new Func1<ServiceResponse<Page<FeatureResultInner>>, Observable<ServiceResponse<Page<FeatureResultInner>>>>() {
@Override
public Observable<ServiceResponse<Page<FeatureResultInner>>> call(ServiceResponse<Page<FeatureResultInner>> page) {
String nextPageLink = page.body().nextPageLink();
if (nextPageLink == null) {
return Observable.just(page);
}
return Observable.just(page).concatWith(list1NextWithServiceResponseAsync(nextPageLink));
}
});
} |
java | public int startNewSession() {
if (txSessEncKey != null)
return SESSION_ERROR_ALREADY_ACTIVE;
if ((txMasterSalt == null) || (rxMasterSalt == null))
return SESSION_ERROR_MASTER_SALT_UDNEFINED;
if ((txMasterKey == null) || (rxMasterKey == null))
return SESSION_ERROR_MASTER_SALT_UDNEFINED;
if (!txSessionKeyDerivation()) {
log("startNewSession txSessionKeyDerivation failed");
return SESSION_ERROR_KEY_DERIVATION_FAILED;
}
// Create encryptor components for tx session
try {
// and the HMAC components
txEncryptorSuite = platform.getCrypto().createEncryptorSuite(
txSessEncKey, initVector);
txHMAC = platform.getCrypto().createHMACSHA1(txSessAuthKey);
} catch (Throwable e) {
log("startNewSession failed to create Tx encryptor");
return SESSION_ERROR_RESOURCE_CREATION_PROBLEM;
}
replayWindow = platform.getUtils().createSortedVector();
receivedFirst = false;
rollOverCounter = 0;
rxRoc = 0;
txIV = new byte[16]; // Always uses a 128 bit IV
rxIV = new byte[16];
txEncOut = new byte[16];
rxEncOut = new byte[16];
return SESSION_OK;
} |
python | def _collect_variables(names, expressions=None):
"""
Map labels and expressions to registered variables.
Handles argument matching.
Example:
_collect_variables(names=['zones', 'zone_id'],
expressions=['parcels.zone_id'])
Would return a dict representing:
{'parcels': <DataFrameWrapper for zones>,
'zone_id': <pandas.Series for parcels.zone_id>}
Parameters
----------
names : list of str
List of registered variable names and/or labels.
If mixing names and labels, labels must come at the end.
expressions : list of str, optional
List of registered variable expressions for labels defined
at end of `names`. Length must match the number of labels.
Returns
-------
variables : dict
Keys match `names`. Values correspond to registered variables,
which may be wrappers or evaluated functions if appropriate.
"""
# Map registered variable labels to expressions.
if not expressions:
expressions = []
offset = len(names) - len(expressions)
labels_map = dict(tz.concatv(
tz.compatibility.zip(names[:offset], names[:offset]),
tz.compatibility.zip(names[offset:], expressions)))
all_variables = tz.merge(_INJECTABLES, _TABLES)
variables = {}
for label, expression in labels_map.items():
# In the future, more registered variable expressions could be
# supported. Currently supports names of registered variables
# and references to table columns.
if '.' in expression:
# Registered variable expression refers to column.
table_name, column_name = expression.split('.')
table = get_table(table_name)
variables[label] = table.get_column(column_name)
else:
thing = all_variables[expression]
if isinstance(thing, (_InjectableFuncWrapper, TableFuncWrapper)):
# Registered variable object is function.
variables[label] = thing()
else:
variables[label] = thing
return variables |
java | public HttpResponse sendRequest(PiwikRequest request) throws IOException{
HttpClient client = getHttpClient();
uriBuilder.replaceQuery(request.getUrlEncodedQueryString());
HttpGet get = new HttpGet(uriBuilder.build());
try {
return client.execute(get);
} finally {
get.releaseConnection();
}
} |
python | def pngout(ext_args):
"""Run the external program pngout on the file."""
args = _PNGOUT_ARGS + [ext_args.old_filename, ext_args.new_filename]
extern.run_ext(args)
return _PNG_FORMAT |
python | def map_agent(self, agent, do_rename):
"""Return the given Agent with its grounding mapped.
This function grounds a single agent. It returns the new Agent object
(which might be a different object if we load a new agent state
from json) or the same object otherwise.
Parameters
----------
agent : :py:class:`indra.statements.Agent`
The Agent to map.
do_rename: bool
If True, the Agent name is updated based on the mapped grounding.
If do_rename is True the priority for setting the name is
FamPlex ID, HGNC symbol, then the gene name
from Uniprot.
Returns
-------
grounded_agent : :py:class:`indra.statements.Agent`
The grounded Agent.
maps_to_none : bool
True if the Agent is in the grounding map and maps to None.
"""
agent_text = agent.db_refs.get('TEXT')
mapped_to_agent_json = self.agent_map.get(agent_text)
if mapped_to_agent_json:
mapped_to_agent = \
Agent._from_json(mapped_to_agent_json['agent'])
return mapped_to_agent, False
# Look this string up in the grounding map
# If not in the map, leave agent alone and continue
if agent_text in self.gm.keys():
map_db_refs = self.gm[agent_text]
else:
return agent, False
# If it's in the map but it maps to None, then filter out
# this statement by skipping it
if map_db_refs is None:
# Increase counter if this statement has not already
# been skipped via another agent
logger.debug("Skipping %s" % agent_text)
return None, True
# If it has a value that's not None, map it and add it
else:
# Otherwise, update the agent's db_refs field
self.update_agent_db_refs(agent, agent_text, do_rename)
return agent, False |
java | public void setTMRecoveryService(TMRecoveryService tmrec) {
if (tc.isDebugEnabled())
Tr.debug(tc, "setTMRecoveryService " + tmrec);
if (tmsRef != null) {
if (!_isSQLRecoveryLog) {
if (_cc != null) {
tmsRef.doStartup(this, _isSQLRecoveryLog);
}
} else {
// If the JTMConfigurationProvider has been activated, and if the DataSourceFactory
// has been provided, we can initiate recovery
ServiceReference<ResourceFactory> serviceRef = dataSourceFactoryRef.getReference();
if (tc.isDebugEnabled())
Tr.debug(tc, "retrieved datasourceFactory service ref " + serviceRef);
if (_cc != null && serviceRef != null) {
tmsRef.doStartup(this, _isSQLRecoveryLog);
}
}
} else if (tc.isDebugEnabled())
Tr.debug(tc, "tmsref is null");
} |
java | protected final void flushBuffer(boolean closeConn) throws IOException {
if (!flushed && closeConn) {
response.setHeader("connection", "close");
// if(showVersion)response.setHeader(Constants.NAME+"-Version", version);
}
initOut();
byte[] barr = _toString(true).getBytes(ReqRspUtil.getCharacterEncoding(null, response));
if (cacheItem != null && cacheItem.isValid()) {
cacheItem.store(barr, flushed);
// writeCache(barr,flushed);
}
flushed = true;
out.write(barr);
buffer = new StringBuilder(BUFFER_SIZE); // to not change to clearBuffer, produce problem with CFMLWriterWhiteSpace.clearBuffer
} |
python | def display_path_as_ul(category, using='categories.Category'):
"""
Render the category with ancestors, but no children using the
``categories/ul_tree.html`` template.
Example::
{% display_path_as_ul "/Grandparent/Parent" %}
or ::
{% display_path_as_ul category_obj %}
Returns::
<ul>
<li><a href="/categories/">Top</a>
<ul>
<li><a href="/categories/grandparent/">Grandparent</a></li>
</ul>
</li>
</ul>
"""
if isinstance(category, CategoryBase):
cat = category
else:
cat = get_category(category)
return {'category': cat, 'path': cat.get_ancestors() or []} |
java | public MusicElement getElementByReference(MusicElementReference ref) {
if (voiceExists(ref.getVoice())) {
for (Object o : getVoice(ref.getVoice())) {
MusicElement element = (MusicElement) o;
if (element.getReference().equals(ref))
return element;
}
}
return null;
} |
java | public BoxCollaboration.Info collaborate(String email, BoxCollaboration.Role role,
Boolean notify, Boolean canViewPath) {
JsonObject accessibleByField = new JsonObject();
accessibleByField.add("login", email);
accessibleByField.add("type", "user");
return this.collaborate(accessibleByField, role, notify, canViewPath);
} |
python | def load_vocabulary(lang="en", type="wiki"):
"""Return a CountedVocabulary object.
Args:
lang (string): language code.
type (string): wiki,...
"""
src_dir = "{}_vocab".format(type)
p = locate_resource(src_dir, lang)
return CountedVocabulary.from_vocabfile(p) |
python | def translate(
nucleotide_sequence,
first_codon_is_start=True,
to_stop=True,
truncate=False):
"""Translates cDNA coding sequence into amino acid protein sequence.
Should typically start with a start codon but allowing non-methionine
first residues since the CDS we're translating might have been affected
by a start loss mutation.
The sequence may include the 3' UTR but will stop translation at the first
encountered stop codon.
Parameters
----------
nucleotide_sequence : BioPython Seq
cDNA sequence
first_codon_is_start : bool
Treat the beginning of nucleotide_sequence (translates methionin)
truncate : bool
Truncate sequence if it's not a multiple of 3 (default = False)
Returns BioPython Seq of amino acids
"""
if not isinstance(nucleotide_sequence, Seq):
nucleotide_sequence = Seq(nucleotide_sequence)
if truncate:
# if sequence isn't a multiple of 3, truncate it so BioPython
# doesn't complain
n_nucleotides = int(len(nucleotide_sequence) / 3) * 3
nucleotide_sequence = nucleotide_sequence[:n_nucleotides]
else:
n_nucleotides = len(nucleotide_sequence)
assert n_nucleotides % 3 == 0, \
("Expected nucleotide sequence to be multiple of 3"
" but got %s of length %d") % (
nucleotide_sequence,
n_nucleotides)
# passing cds=False to translate since we may want to deal with premature
# stop codons
protein_sequence = nucleotide_sequence.translate(to_stop=to_stop, cds=False)
if first_codon_is_start and (
len(protein_sequence) == 0 or protein_sequence[0] != "M"):
if nucleotide_sequence[:3] in START_CODONS:
# TODO: figure out when these should be made into methionines
# and when left as whatever amino acid they normally code for
# e.g. Leucine start codons
# See: DOI: 10.1371/journal.pbio.0020397
return "M" + protein_sequence[1:]
else:
raise ValueError(
("Expected first codon of %s to be start codon"
" (one of %s) but got %s") % (
protein_sequence[:10],
START_CODONS,
nucleotide_sequence))
return protein_sequence |
java | public static void initialiseAcceptListenerFactory(AcceptListenerFactory _acceptListenerFactory)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(tc, "initialiseAcceptListenerFactory", _acceptListenerFactory);
Class clientImpl = instance.getClass();
Method initialiseAcceptListenerFactoryMethod;
try
{
initialiseAcceptListenerFactoryMethod = clientImpl.getMethod("initialiseAcceptListenerFactory", new Class[] { AcceptListenerFactory.class });
initialiseAcceptListenerFactoryMethod.invoke(clientImpl, new Object[] { _acceptListenerFactory });
} catch (Exception e)
{
FFDCFilter.processException(e, "com.ibm.ws.sib.jfapchannel.ServerConnectionManager.initialiseAcceptListenerFactory", JFapChannelConstants.SRVRCONNMGR_INITIALISE_ALF_01);
//Make sure we allow for the fact this could be an InvocationTargetException
Throwable displayedException = e;
if (e instanceof InvocationTargetException)
displayedException = e.getCause();
SibTr.error(tc, "EXCP_DURING_INIT_SICJ0081", new Object[] { clientImpl, displayedException });
if (TraceComponent.isAnyTracingEnabled() && tc.isEventEnabled())
SibTr.exception(tc, e);
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "initialiseAcceptListenerFactory");
} |
java | private static UriComponentsBuilder getBuilder(@Nullable ServerWebExchange exchange) {
return exchange == null //
? UriComponentsBuilder.fromPath("/") //
: UriComponentsBuilder.fromHttpRequest(exchange.getRequest());
} |
java | private static ChannelBuffer createDataTerminatingChannelBuffer(byte[] data) {
int length = data.length;
if (length < 1) {
return ChannelBuffers.wrappedBuffer(CRLF_DOT_CRLF);
} else {
byte[] terminating;
byte last = data[length -1];
if (length == 1) {
if (last == CR) {
terminating = LF_DOT_CRLF;
} else {
terminating = CRLF_DOT_CRLF;
}
} else {
byte prevLast = data[length - 2];
if (last == LF) {
if (prevLast == CR) {
terminating = DOT_CRLF;
} else {
terminating = CRLF_DOT_CRLF;
}
} else if (last == CR) {
terminating = LF_DOT_CRLF;
} else {
terminating = CRLF_DOT_CRLF;
}
}
return ChannelBuffers.wrappedBuffer(data, terminating);
}
} |
python | def _set_route_parameter(self, v, load=False):
"""
Setter method for route_parameter, mapped from YANG variable /hardware/profile/route/predefined/route_parameter (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_parameter is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_parameter() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=route_parameter.route_parameter, is_container='container', presence=False, yang_name="route_parameter", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'display-when': u"(../route_profiletype = 'route-enhance')"}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_parameter must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=route_parameter.route_parameter, is_container='container', presence=False, yang_name="route_parameter", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'display-when': u"(../route_profiletype = 'route-enhance')"}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)""",
})
self.__route_parameter = t
if hasattr(self, '_set'):
self._set() |
python | def decrypt_file(self, filename, always_trust=False, passphrase=None,
output=None):
"""Decrypt the contents of a file-like object ``filename`` .
:param str filename: A file-like object to decrypt.
:param bool always_trust: Instruct GnuPG to ignore trust checks.
:param str passphrase: The passphrase for the secret key used for decryption.
:param str output: A filename to write the decrypted output to.
"""
args = ["--decrypt"]
if output: # write the output to a file with the specified name
if os.path.exists(output):
os.remove(output) # to avoid overwrite confirmation message
args.append('--output %s' % output)
if always_trust:
args.append("--always-trust")
result = self._result_map['crypt'](self)
self._handle_io(args, filename, result, passphrase, binary=True)
log.debug('decrypt result: %r', result.data)
return result |
java | public void execute(@Param("pipelineId") Long pipelineId, Context context, Navigator nav) throws Exception {
Channel channel = channelService.findByPipelineId(pipelineId);
if (channel.getStatus().isStart()) {
nav.redirectTo(WebConstant.ERROR_FORBIDDEN_Link);
return;
}
Pipeline pipeline = pipelineService.findById(pipelineId);
context.put("pipeline", pipeline);
context.put("nodes", nodeService.listAll());
} |
java | protected List<AccessControlEntry> readACLPermisions(String cid) throws SQLException, IllegalACLException
{
List<AccessControlEntry> naPermissions = new ArrayList<AccessControlEntry>();
ResultSet exoPerm = findPropertyByName(cid, Constants.EXO_PERMISSIONS.getAsString());
try
{
if (exoPerm.next())
{
do
{
naPermissions.add(AccessControlEntry.parse(new String(exoPerm.getBytes(COLUMN_VDATA))));
}
while (exoPerm.next());
return naPermissions;
}
else
{
throw new IllegalACLException("Property exo:permissions is not found for node with id: "
+ getIdentifier(cid));
}
}
finally
{
try
{
exoPerm.close();
}
catch (SQLException e)
{
LOG.error("Can't close the ResultSet: " + e.getMessage());
}
}
} |
python | def netconf_state_statistics_dropped_sessions(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
statistics = ET.SubElement(netconf_state, "statistics")
dropped_sessions = ET.SubElement(statistics, "dropped-sessions")
dropped_sessions.text = kwargs.pop('dropped_sessions')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
python | def _ppf(self, uloc, left, right, cache):
"""
Point percentile function.
Example:
>>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9]))
[0.1 0.2 0.9]
>>> print(chaospy.Add(chaospy.Uniform(), 2).inv([0.1, 0.2, 0.9]))
[2.1 2.2 2.9]
>>> print(chaospy.Add(2, chaospy.Uniform()).inv([0.1, 0.2, 0.9]))
[2.1 2.2 2.9]
>>> print(chaospy.Add(1, 1).inv([0.1, 0.2, 0.9]))
[2. 2. 2.]
"""
left = evaluation.get_inverse_cache(left, cache)
right = evaluation.get_inverse_cache(right, cache)
if isinstance(left, Dist):
if isinstance(right, Dist):
raise evaluation.DependencyError(
"under-defined distribution {} or {}".format(left, right))
elif not isinstance(right, Dist):
return left+right
else:
left, right = right, left
xloc = evaluation.evaluate_inverse(left, uloc, cache=cache)
output = (xloc.T + numpy.asfarray(right).T).T
return output |
python | def add_to_results(self, data, label, results):
"""
Adds the label to the results, as needed, then appends the data
to the running results tab
"""
for datum in data:
if label is not None:
datum.update({'type': label})
results.append(datum)
return results |
java | public T removeAcl(List<AclEntry> entries) {
for (AclEntry entry : entries) {
if (entry.isDefault()) {
AccessControlList defaultAcl = getDefaultACL();
defaultAcl.removeEntry(entry);
} else {
mAcl.removeEntry(entry);
}
}
updateMask(entries);
return getThis();
} |
java | public void setAdditionalArtifacts(java.util.Collection<String> additionalArtifacts) {
if (additionalArtifacts == null) {
this.additionalArtifacts = null;
return;
}
this.additionalArtifacts = new java.util.ArrayList<String>(additionalArtifacts);
} |
java | @NotNull
public DoubleStream doubles() {
return DoubleStream.generate(new DoubleSupplier() {
@Override
public double getAsDouble() {
return random.nextDouble();
}
});
} |
python | def new_calendar(self, calendar_name):
""" Creates a new calendar
:param str calendar_name: name of the new calendar
:return: a new Calendar instance
:rtype: Calendar
"""
if not calendar_name:
return None
url = self.build_url(self._endpoints.get('root_calendars'))
response = self.con.post(url, data={self._cc('name'): calendar_name})
if not response:
return None
data = response.json()
# Everything received from cloud must be passed as self._cloud_data_key
return self.calendar_constructor(parent=self,
**{self._cloud_data_key: data}) |
python | def list_namespaced_pod_template(self, namespace, **kwargs):
"""
list or watch objects of kind PodTemplate
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_pod_template(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1PodTemplateList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_pod_template_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_pod_template_with_http_info(namespace, **kwargs)
return data |
python | def electric_field_amplitude_intensity(s0, Isat=16.6889462814,
Omega=1e6, units="ad-hoc"):
"""Return the amplitude of the electric field for saturation parameter.
This is at a given saturation parameter s0=I/Isat, where I0 is by default \
Isat=16.6889462814 m/m^2 is the saturation intensity of the D2 line of \
rubidium for circularly polarized light. Optionally, a frequency scale \
`Omega` can be provided.
>>> print(electric_field_amplitude_intensity(1.0, units="ad-hoc"))
9.0152984553
>>> print(electric_field_amplitude_intensity(1.0, Omega=1.0, units="SI"))
112.135917207
>>> print(electric_field_amplitude_intensity(1.0, units="SI"))
0.000112135917207
"""
E0_sat = sqrt(2*mu0*c*Isat)/Omega
if units == "ad-hoc":
e0 = hbar/(e*a0) # This is the electric field scale.
E0_sat = E0_sat/e0
return E0_sat*sqrt(s0) |
java | public INDArray reconstructionLogProbability(INDArray data, int numSamples) {
if (numSamples <= 0) {
throw new IllegalArgumentException(
"Invalid input: numSamples must be > 0. Got: " + numSamples + " " + layerId());
}
if (reconstructionDistribution instanceof LossFunctionWrapper) {
throw new UnsupportedOperationException("Cannot calculate reconstruction log probability when using "
+ "a LossFunction (via LossFunctionWrapper) instead of a ReconstructionDistribution: ILossFunction "
+ "instances are not in general probabilistic, hence it is not possible to calculate reconstruction probability "
+ layerId());
}
data = data.castTo(dataType);
//Forward pass through the encoder and mean for P(Z|X)
LayerWorkspaceMgr workspaceMgr = LayerWorkspaceMgr.noWorkspaces(); //TODO add workspace support to this method
setInput(data, workspaceMgr);
VAEFwdHelper fwd = doForward(true, true, workspaceMgr);
IActivation afn = layerConf().getActivationFn();
//Forward pass through logStd^2 for P(Z|X)
INDArray pzxLogStd2W = getParamWithNoise(VariationalAutoencoderParamInitializer.PZX_LOGSTD2_W, false, workspaceMgr);
INDArray pzxLogStd2b = getParamWithNoise(VariationalAutoencoderParamInitializer.PZX_LOGSTD2_B, false, workspaceMgr);
INDArray meanZ = fwd.pzxMeanPreOut;
INDArray logStdev2Z = fwd.encoderActivations[fwd.encoderActivations.length - 1].mmul(pzxLogStd2W)
.addiRowVector(pzxLogStd2b);
pzxActivationFn.getActivation(meanZ, false);
pzxActivationFn.getActivation(logStdev2Z, false);
INDArray pzxSigma = Transforms.exp(logStdev2Z, false);
Transforms.sqrt(pzxSigma, false);
val minibatch = input.size(0);
val size = fwd.pzxMeanPreOut.size(1);
INDArray pxzw = getParamWithNoise(VariationalAutoencoderParamInitializer.PXZ_W, false, workspaceMgr);
INDArray pxzb = getParamWithNoise(VariationalAutoencoderParamInitializer.PXZ_B, false, workspaceMgr);
INDArray[] decoderWeights = new INDArray[decoderLayerSizes.length];
INDArray[] decoderBiases = new INDArray[decoderLayerSizes.length];
for (int i = 0; i < decoderLayerSizes.length; i++) {
String wKey = "d" + i + WEIGHT_KEY_SUFFIX;
String bKey = "d" + i + BIAS_KEY_SUFFIX;
decoderWeights[i] = getParamWithNoise(wKey, false, workspaceMgr);
decoderBiases[i] = getParamWithNoise(bKey, false, workspaceMgr);
}
INDArray sumReconstructionNegLogProbability = null;
for (int i = 0; i < numSamples; i++) {
INDArray e = Nd4j.randn(dataType, minibatch, size);
INDArray z = e.muli(pzxSigma).addi(meanZ); //z = mu + sigma * e, with e ~ N(0,1)
//Do forward pass through decoder
int nDecoderLayers = decoderLayerSizes.length;
INDArray currentActivations = z;
for (int j = 0; j < nDecoderLayers; j++) {
currentActivations = currentActivations.mmul(decoderWeights[j]).addiRowVector(decoderBiases[j]);
afn.getActivation(currentActivations, false);
}
//And calculate reconstruction distribution preOut
INDArray pxzDistributionPreOut = currentActivations.mmul(pxzw).addiRowVector(pxzb);
if (i == 0) {
sumReconstructionNegLogProbability =
reconstructionDistribution.exampleNegLogProbability(data, pxzDistributionPreOut);
} else {
sumReconstructionNegLogProbability
.addi(reconstructionDistribution.exampleNegLogProbability(data, pxzDistributionPreOut));
}
}
setInput(null, workspaceMgr);
return sumReconstructionNegLogProbability.divi(-numSamples);
} |
python | def record_message(self, msg, from_rewarder):
"""Record a message to our rewards.demo file if it is has been opened"""
if self.file:
# Include an authoritative timestamp (because the `sent_at` from the server is likely to be different
timestamped_message = {
'timestamp': time.time(),
'message': json.loads(msg),
'from_rewarder': from_rewarder,
}
self.file.write(json.dumps(timestamped_message))
self.file.write('\n')
self.file.flush() |
python | def partial_derivative_scalar(self, U, V, y=0):
"""Compute partial derivative :math:`C(u|v)` of cumulative density of single values."""
self.check_fit()
X = np.column_stack((U, V))
return self.partial_derivative(X, y) |
python | def register_hit_type(self, title, description, reward, duration,
keywords=None, approval_delay=None, qual_req=None):
"""
Register a new HIT Type
title, description are strings
reward is a Price object
duration can be a timedelta, or an object castable to an int
"""
params = dict(
Title=title,
Description=description,
AssignmentDurationInSeconds=
self.duration_as_seconds(duration),
)
params.update(MTurkConnection.get_price_as_price(reward).get_as_params('Reward'))
if keywords:
params['Keywords'] = self.get_keywords_as_string(keywords)
if approval_delay is not None:
d = self.duration_as_seconds(approval_delay)
params['AutoApprovalDelayInSeconds'] = d
if qual_req is not None:
params.update(qual_req.get_as_params())
return self._process_request('RegisterHITType', params) |
java | protected void displayValues(CmsLocationValue value) {
m_addressField.setTextValue(value.getAddress());
m_latitudeField.setFormValueAsString(value.getLatitudeString());
m_longitudeField.setFormValueAsString(value.getLongitudeString());
m_heightField.setFormValueAsString("" + value.getHeight());
m_widthField.setFormValueAsString("" + value.getWidth());
m_zoomField.setFormValueAsString("" + value.getZoom());
m_modeField.setFormValueAsString(value.getMode());
m_typeField.setFormValueAsString(value.getType());
} |
python | def has_gis(wrapped, instance, args, kwargs):
"""Skip function execution if there are no presamples"""
if gis:
return wrapped(*args, **kwargs)
else:
warn(MISSING_GIS) |
python | def energy_upperbound(self, spins):
"""A formula for an upper bound on the energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
Returns:
Formula that upper bounds the energy with spins fixed.
"""
subtheta = self.theta.copy()
subtheta.fix_variables(spins)
# ok, let's start eliminating variables
trees = self._trees
if not trees:
# if there are no variables to eliminate, then the offset of
# subtheta is the exact value and we can just return it
assert not subtheta.linear and not subtheta.quadratic
return subtheta.offset
energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset)
return energy |
python | def createfastq(self):
"""Uses bcl2fastq to create .fastq files from a MiSeqRun"""
# Initialise samplecount
samplecount = 0
# If the fastq destination folder is not provided, make the default value of :path/:miseqfoldername
self.fastqdestination = self.fastqdestination if self.fastqdestination else self.path + self.miseqfoldername
# Make the path
make_path(self.fastqdestination)
# Initialise variables for storing index information
index = ''
indexlength = int()
# bcl2fastq requires an older version of the sample sheet, this recreates the required version
# Create the new sample sheet
with open('{}/SampleSheet_modified.csv'.format(self.fastqdestination), "w") as modifiedsamplesheet:
# Write the required headings to the file
modifiedsamplesheet.write(
"FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,Operator,SampleProject\n")
for strain in self.samples:
# Create a combined index of index1-index2
try:
strain.run.modifiedindex = '{}-{}'.format(strain.run.index, strain.run.index2)
indexlength = 16
index = 'I8,I8'
except KeyError:
strain.run.modifiedindex = strain.run.index
indexlength = 6
index = 'I6'
# The list of items to print to each line of the modified sample sheet
printlist = [self.flowcell, '1', strain.name, str(strain.run.SampleNumber), strain.run.modifiedindex,
strain.run.Description, 'N', 'NA',
strain.run.InvestigatorName, self.projectname]
modifiedsamplesheet.write('{}\n'.format(",".join(printlist)))
samplecount += 1
# Set :forward/reverse length to :header.forward/reverse length if the argument is not provided, or it's 'full',
# otherwise use the supplied argument
self.forwardlength = self.metadata.header.forwardlength if self.forwardlength.lower()\
== 'full' else self.forwardlength
# Set :reverselength to :header.reverselength
self.reverselength = self.metadata.header.reverselength if self.reverselength.lower() \
== 'full' else self.reverselength
# As the number of cycles required is the number of forward reads + the index(8) + the second index(8)
# Also set the basemask variable as required
if self.reverselength != '0':
self.readsneeded = int(self.forwardlength) + int(self.reverselength) + indexlength
basemask = "Y{}n*,{},Y{}n*".format(self.forwardlength, index, self.reverselength)
nohup = "nohup make -j 16 > nohup.out"
else:
# + 1
self.readsneeded = int(self.forwardlength) + indexlength
basemask = "Y{}n*,{},n*".format(self.forwardlength, index)
nohup = "nohup make -j 16 r1 > nohup.out"
# Handle plurality appropriately
samples = 'samples' if samplecount > 1 else 'sample'
number = 'are' if samplecount > 1 else 'is'
printtime('There {} {} {} in this run. '
'Running fastq creating module with the following parameters:\n'
'MiSeqPath: {},\n'
'MiSeqFolder: {},\n'
'Fastq destination: {},\n'
'SampleSheet: {}'
.format(number, samplecount, samples, self.miseqpath, self.miseqfolder,
self.fastqdestination, '{}/SampleSheet_modified.csv'.format(self.fastqdestination)),
self.start)
# Count the number of completed cycles in the run of interest
cycles = glob('{}Data/Intensities/BaseCalls/L001/C*'.format(self.miseqfolder))
while len(cycles) < self.readsneeded:
printtime('Currently at {} cycles. Waiting until the MiSeq reaches cycle {}'.format(len(cycles),
self.readsneeded), self.start)
sleep(1800)
cycles = glob('{}Data/Intensities/BaseCalls/L001/C*'.format(self.miseqfolder))
# configureBClToFastq requires :self.miseqfolder//Data/Intensities/BaseCalls/config.xml in order to work
# When you download runs from BaseSpace, this file is not provided. There is an empty config.xml file that
# can be populated with run-specific values and moved to the appropriate folder
if not os.path.isfile('{}Data/Intensities/BaseCalls/config.xml'.format(self.miseqfolder)):
self.configfilepopulator()
# Define the bcl2fastq system call
bclcall = "configureBclToFastq.pl --input-dir {}Data/Intensities/BaseCalls " \
"--output-dir {} --force --sample-sheet {}/SampleSheet_modified.csv " \
"--mismatches 1 --no-eamss --fastq-cluster-count 0 --compression none --use-bases-mask {}"\
.format(self.miseqfolder, self.fastqdestination, self.fastqdestination, basemask)
# Define the nohup system call
nohupcall = "cd {} && {}".format(self.fastqdestination, nohup)
# fnull = open(os.devnull, 'wb')
if not os.path.isdir("{}/Project_{}".format(self.fastqdestination, self.projectname)):
# Call configureBclToFastq.pl
printtime('Running bcl2fastq', self.start)
# Run the commands
threadlock = threading.Lock()
outstr = ''
outerr = ''
out, err = run_subprocess(bclcall)
outstr += out
outerr += out
out, err = run_subprocess(nohupcall)
outstr += out
outerr += out
# call(bclcall, shell=True, stdout=fnull, stderr=fnull)
# call(nohupcall, shell=True, stdout=fnull, stderr=fnull)
threadlock.acquire()
write_to_logfile(bclcall, bclcall, self.logfile)
write_to_logfile(nohupcall, nohupcall, self.logfile)
write_to_logfile(outstr, outerr, self.logfile)
threadlock.release()
# Populate the metadata
for sample in self.metadata.samples:
sample.commands = GenObject()
sample.commands.nohup = nohupcall
sample.commands.bcl = bclcall
sample.run.forwardlength = self.forwardlength
sample.run.reverselength = self.reverselength
# Copy the fastq files to a central folder so they can be processed
self.fastqmover() |
java | private DocumentReferenceTranslator getDocRefTranslator(Object value) {
if (value instanceof org.apache.xmlbeans.XmlObject)
return (DocumentReferenceTranslator) VariableTranslator.getTranslator(org.apache.xmlbeans.XmlObject.class.getName());
else if (value instanceof org.w3c.dom.Document)
return (DocumentReferenceTranslator) VariableTranslator.getTranslator(org.w3c.dom.Document.class.getName());
else if (value instanceof com.centurylink.mdw.xml.XmlBeanWrapper)
return (DocumentReferenceTranslator) VariableTranslator.getTranslator(com.centurylink.mdw.xml.XmlBeanWrapper.class.getName());
else if (value instanceof groovy.util.Node)
return (DocumentReferenceTranslator) VariableTranslator.getTranslator(groovy.util.Node.class.getName());
else
return null;
} |
java | public static double exponentialCdf(double x, double lamda) {
if(x<0 || lamda<=0) {
throw new IllegalArgumentException("All the parameters must be positive.");
}
double probability = 1.0 - Math.exp(-lamda*x);
return probability;
} |
python | def _to_graphviz(tree_info, show_info, feature_names, precision=None, **kwargs):
"""Convert specified tree to graphviz instance.
See:
- https://graphviz.readthedocs.io/en/stable/api.html#digraph
"""
if GRAPHVIZ_INSTALLED:
from graphviz import Digraph
else:
raise ImportError('You must install graphviz to plot tree.')
def add(root, parent=None, decision=None):
"""Recursively add node or edge."""
if 'split_index' in root: # non-leaf
name = 'split{0}'.format(root['split_index'])
if feature_names is not None:
label = 'split_feature_name: {0}'.format(feature_names[root['split_feature']])
else:
label = 'split_feature_index: {0}'.format(root['split_feature'])
label += r'\nthreshold: {0}'.format(_float2str(root['threshold'], precision))
for info in show_info:
if info in {'split_gain', 'internal_value'}:
label += r'\n{0}: {1}'.format(info, _float2str(root[info], precision))
elif info == 'internal_count':
label += r'\n{0}: {1}'.format(info, root[info])
graph.node(name, label=label)
if root['decision_type'] == '<=':
l_dec, r_dec = '<=', '>'
elif root['decision_type'] == '==':
l_dec, r_dec = 'is', "isn't"
else:
raise ValueError('Invalid decision type in tree model.')
add(root['left_child'], name, l_dec)
add(root['right_child'], name, r_dec)
else: # leaf
name = 'leaf{0}'.format(root['leaf_index'])
label = 'leaf_index: {0}'.format(root['leaf_index'])
label += r'\nleaf_value: {0}'.format(_float2str(root['leaf_value'], precision))
if 'leaf_count' in show_info:
label += r'\nleaf_count: {0}'.format(root['leaf_count'])
graph.node(name, label=label)
if parent is not None:
graph.edge(parent, name, decision)
graph = Digraph(**kwargs)
add(tree_info['tree_structure'])
return graph |
java | public static <V> V sqlExecute(final SqlVarArgsFunction<V> functional, final Object... args)
{
return new SqlClosure<V>() {
@Override
public V execute(Connection connection, Object... params) throws SQLException
{
return functional.execute(connection, params);
}
}.executeWith(args);
} |
java | public boolean handleCharacter(int ch) {
switch(this.state) {
case SETTING_NORMAL:
this.fontName += (char)ch;
break;
case SETTING_ALTERNATE:
this.falt += (char)ch;
break;
case SETTING_PANOSE:
this.panose += (char)ch;
break;
case SETTING_FONT_EMBED:
break;
case SETTING_FONT_FILE:
break;
case SETTING_FONTNAME:
break;
}
return true;
} |
python | def preview(request):
""" Render preview page.
:returns: A rendered preview
"""
if settings.MARKDOWN_PROTECT_PREVIEW:
user = getattr(request, 'user', None)
if not user or not user.is_staff:
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
return render(
request, settings.MARKDOWN_PREVIEW_TEMPLATE, dict(
content=request.POST.get('data', 'No content posted'),
css=settings.MARKDOWN_STYLE
)) |
java | @Benchmark
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
public byte[] messageEncodePlain() {
return Status.MESSAGE_KEY.toBytes("Unexpected RST in stream");
} |
java | @XmlElementDecl(namespace = "urn:oasis:names:tc:xacml:2.0:policy:schema:os", name = "PolicySetCombinerParameters", scope = PolicySetType.class)
public JAXBElement<PolicySetCombinerParametersType> createPolicySetTypePolicySetCombinerParameters(PolicySetCombinerParametersType value) {
return new JAXBElement<PolicySetCombinerParametersType>(_PolicySetCombinerParameters_QNAME, PolicySetCombinerParametersType.class, PolicySetType.class, value);
} |
java | void afterRead(Node<K, V> node, long now, boolean recordHit) {
if (recordHit) {
statsCounter().recordHits(1);
}
boolean delayable = skipReadBuffer() || (readBuffer.offer(node) != Buffer.FULL);
if (shouldDrainBuffers(delayable)) {
scheduleDrainBuffers();
}
refreshIfNeeded(node, now);
} |
python | def _get_schema_from_list(frum, table_name, parent, nested_path, columns):
"""
:param frum: The list
:param table_name: Name of the table this list holds records for
:param parent: parent path
:param nested_path: each nested array, in reverse order
:param columns: map from full name to column definition
:return:
"""
for d in frum:
row_type = python_type_to_json_type[d.__class__]
if row_type != "object":
# EXPECTING PRIMITIVE VALUE
full_name = parent
column = columns[full_name]
if not column:
column = Column(
name=concat_field(table_name, full_name),
es_column=full_name,
es_index=".",
es_type=d.__class__.__name__,
jx_type=None, # WILL BE SET BELOW
last_updated=Date.now(),
nested_path=nested_path,
)
columns.add(column)
column.es_type = _merge_python_type(column.es_type, d.__class__)
column.jx_type = python_type_to_json_type[column.es_type]
else:
for name, value in d.items():
full_name = concat_field(parent, name)
column = columns[full_name]
if not column:
column = Column(
name=concat_field(table_name, full_name),
es_column=full_name,
es_index=".",
es_type=value.__class__.__name__,
jx_type=None, # WILL BE SET BELOW
last_updated=Date.now(),
nested_path=nested_path,
)
columns.add(column)
if is_container(value): # GET TYPE OF MULTIVALUE
v = list(value)
if len(v) == 0:
this_type = none_type.__name__
elif len(v) == 1:
this_type = v[0].__class__.__name__
else:
this_type = reduce(
_merge_python_type, (vi.__class__.__name__ for vi in value)
)
else:
this_type = value.__class__.__name__
column.es_type = _merge_python_type(column.es_type, this_type)
column.jx_type = python_type_to_json_type[column.es_type]
if this_type in {"object", "dict", "Mapping", "Data"}:
_get_schema_from_list(
[value], table_name, full_name, nested_path, columns
)
elif this_type in {"list", "FlatList"}:
np = listwrap(nested_path)
newpath = unwraplist([join_field(split_field(np[0]) + [name])] + np)
_get_schema_from_list(
value, table_name, full_name, newpath, columns
) |
java | private Plugin probeNextPlugin() {
Plugin plugin = null;
int i = 0;
while (i < listPending.size()) {
// ZAP: Removed unnecessary cast.
plugin = listPending.get(i);
if (isAllDependencyCompleted(plugin)) {
return plugin;
}
i++;
}
return null;
} |
java | private SubheaderPointer processSubheaderPointers(long subheaderPointerOffset, int subheaderPointerIndex)
throws IOException {
int intOrLongLength = sasFileProperties.isU64() ? BYTES_IN_LONG : BYTES_IN_INT;
int subheaderPointerLength = sasFileProperties.isU64() ? SUBHEADER_POINTER_LENGTH_X64
: SUBHEADER_POINTER_LENGTH_X86;
long totalOffset = subheaderPointerOffset + subheaderPointerLength * ((long) subheaderPointerIndex);
Long[] offset = {totalOffset, totalOffset + intOrLongLength, totalOffset + 2L * intOrLongLength,
totalOffset + 2L * intOrLongLength + 1};
Integer[] length = {intOrLongLength, intOrLongLength, 1, 1};
List<byte[]> vars = getBytesFromFile(offset, length);
long subheaderOffset = bytesToLong(vars.get(0));
long subheaderLength = bytesToLong(vars.get(1));
byte subheaderCompression = vars.get(2)[0];
byte subheaderType = vars.get(3)[0];
return new SubheaderPointer(subheaderOffset, subheaderLength, subheaderCompression, subheaderType);
} |
python | def find_key_by_email(self, email, secret=False):
"""Find user's key based on their email address.
:param str email: The email address to search for.
:param bool secret: If True, search through secret keyring.
"""
for key in self.list_keys(secret=secret):
for uid in key['uids']:
if re.search(email, uid):
return key
raise LookupError("GnuPG public key for email %s not found!" % email) |
java | protected Object getUnwrapped(Object name) {
try {
return DeepUnwrap.unwrap(get(name));
} catch (TemplateException e){
throw new ViewException(e);
}
} |
java | private static void mkdir(@NonNull final File directory, final boolean createParents)
throws IOException {
Condition.INSTANCE.ensureNotNull(directory, "The directory may not be null");
boolean result = createParents ? directory.mkdirs() : directory.mkdir();
if (!result && !directory.exists()) {
throw new IOException("Failed to create directory \"" + directory + "\"");
}
} |
python | def getalgo(self, operation, name):
'''Return the algorithm for *operation* named *name*'''
if operation not in self._algorithms:
raise NotAvailable('{0} not registered.'.format(operation))
oper = self._algorithms[operation]
try:
return oper[name]
except KeyError:
raise NotAvailable('{0} algorithm {1} not registered.'
.format(operation, name)) |
java | private static String getValueFromDB(final String _key,
final String _language)
{
String ret = null;
try {
boolean closeContext = false;
if (!Context.isThreadActive()) {
Context.begin();
closeContext = true;
}
final Connection con = Context.getConnection();
final PreparedStatement stmt = con.prepareStatement(DBProperties.SQLSELECT);
stmt.setString(1, _key);
stmt.setString(2, _language);
final ResultSet resultset = stmt.executeQuery();
if (resultset.next()) {
final String defaultValue = resultset.getString(1);
final String value = resultset.getString(2);
if (value != null) {
ret = value.trim();
} else if (defaultValue != null) {
ret = defaultValue.trim();
}
} else {
final PreparedStatement stmt2 = con.prepareStatement(DBProperties.SQLSELECTDEF);
stmt2.setString(1, _key);
final ResultSet resultset2 = stmt2.executeQuery();
if (resultset2.next()) {
final String defaultValue = resultset2.getString(1);
if (defaultValue != null) {
ret = defaultValue.trim();
}
}
resultset2.close();
stmt2.close();
}
resultset.close();
stmt.close();
con.commit();
con.close();
if (closeContext) {
Context.rollback();
}
} catch (final EFapsException e) {
DBProperties.LOG.error("initialiseCache()", e);
} catch (final SQLException e) {
DBProperties.LOG.error("initialiseCache()", e);
}
return ret;
} |
java | @Override
public void broadcast(final FacesEvent event) throws AbortProcessingException {
super.broadcast(event);
FacesContext context = getFacesContext();
// OPEN QUESTION: should we consider a navigation to the same view as a
// no-op navigation?
// only proceed if the response has not been marked complete and
// navigation to another view has not occurred
if ((event instanceof ActionEvent) && !context.getResponseComplete() && (context.getViewRoot() == getViewRootOf(event))) {
ActionListener listener = context.getApplication().getActionListener();
if (listener != null) {
UIViewRoot viewRootBefore = context.getViewRoot();
InstrumentedFacesContext instrumentedContext = new InstrumentedFacesContext(context);
// defer the call to renderResponse() that happens in
// ActionListener#processAction(ActionEvent)
instrumentedContext.disableRenderResponseControl().set();
listener.processAction((ActionEvent) event);
instrumentedContext.restore();
// if the response is marked complete, the story is over
if (!context.getResponseComplete()) {
UIViewRoot viewRootAfter = context.getViewRoot();
// if the view id changed as a result of navigation, then execute
// the JSF lifecycle for the new view id
if (viewRootBefore != viewRootAfter) {
/*
* // execute the JSF lifecycle by dispatching a forward request // this approach is problematic because
* it throws a wrench in the event broadcasting try { context.getExternalContext
* ().dispatch(context.getApplication() .getViewHandler().getActionURL(context,
* viewRootAfter.getViewId()) .substring(context.getExternalContext
* ().getRequestContextPath().length())); // kill this lifecycle execution context.responseComplete(); }
* catch (IOException e) { throw new FacesException("Dispatch to viewId failed: " +
* viewRootAfter.getViewId(), e); }
*/
// manually execute the JSF lifecycle on the new view id
// certain tweaks have to be made to the FacesContext to allow
// us to reset the lifecycle
Lifecycle lifecycle = getLifecycle(context);
instrumentedContext = new InstrumentedFacesContext(context);
instrumentedContext.pushViewIntoRequestMap().clearViewRoot().clearPostback().set();
lifecycle.execute(instrumentedContext);
instrumentedContext.restore();
/*
* Another approach would be to register a phase listener in the decode() method for the phase in which
* the action is set to invoke. The phase listener would performs a servlet forward if a non-redirect
* navigation occurs after the phase.
*/
} else {
// apply the deferred call (relevant when immediate is true)
context.renderResponse();
}
}
}
}
} |
java | public void setSection(MaterialSection section) {
section.select();
syncSectionsState(section);
switch (section.getTarget()) {
case MaterialSection.TARGET_FRAGMENT:
// se l'utente clicca sulla stessa schermata in cui si trova si chiude il drawer e basta
if(section == currentSection) {
if(!deviceSupportMultiPane())
layout.closeDrawer(drawer);
return;
}
changeToolbarColor(section);
setFragment((Fragment) section.getTargetFragment(), section.getTitle(), (Fragment) currentSection.getTargetFragment());
afterFragmentSetted((Fragment) section.getTargetFragment(),section.getTitle());
break;
case MaterialSection.TARGET_ACTIVITY:
this.startActivity(section.getTargetIntent());
if (!deviceSupportMultiPane())
layout.closeDrawer(drawer);
break;
case MaterialSection.TARGET_LISTENER:
// call the section listener
section.getTargetListener().onClick(section);
if (!deviceSupportMultiPane())
layout.closeDrawer(drawer);
default:
break;
}
// se il target e' un activity la sezione corrente rimane quella precedente
if(section.getTarget() != MaterialSection.TARGET_ACTIVITY ) {
syncSectionsState(section);
}
} |
java | public RowSet mapToResultType(ControlBeanContext context, Method m, ResultSet resultSet, Calendar cal) {
final SQL methodSQL = (SQL) context.getMethodPropertySet(m, SQL.class);
final int maxrows = methodSQL.maxRows();
try {
CachedRowSetImpl rows = new CachedRowSetImpl();
if (maxrows > 0) {
rows.setMaxRows(maxrows);
}
rows.populate(resultSet);
return rows;
} catch (SQLException e) {
throw new ControlException(e.getMessage(), e);
}
} |
python | def set_window_iconify_callback(window, cbfun):
"""
Sets the iconify callback for the specified window.
Wrapper for:
GLFWwindowiconifyfun glfwSetWindowIconifyCallback(GLFWwindow* window, GLFWwindowiconifyfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_iconify_callback_repository:
previous_callback = _window_iconify_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowiconifyfun(cbfun)
_window_iconify_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowIconifyCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] |
java | public static void init(ProcessingEnvironment env) {
processingEnv = env;
typeUtils = processingEnv.getTypeUtils();
elementUtils = processingEnv.getElementUtils();
} |
java | public static RedisConnectionException create(Throwable cause) {
if (cause instanceof RedisConnectionException) {
return new RedisConnectionException(cause.getMessage(), cause.getCause());
}
return new RedisConnectionException("Unable to connect", cause);
} |
java | @Override
public DescribeChangeSetResult describeChangeSet(DescribeChangeSetRequest request) {
request = beforeClientExecution(request);
return executeDescribeChangeSet(request);
} |
python | def each(coro, iterable, limit=0, loop=None,
collect=False, timeout=None, return_exceptions=False, *args, **kw):
"""
Concurrently iterates values yielded from an iterable, passing them to
an asynchronous coroutine.
You can optionally collect yielded values passing collect=True param,
which would be equivalent to `paco.map()``.
Mapped values will be returned as an ordered list.
Items order is preserved based on origin iterable order.
Concurrency level can be configurable via `limit` param.
All coroutines will be executed in the same loop.
This function is a coroutine.
This function can be composed in a pipeline chain with ``|`` operator.
Arguments:
coro (coroutinefunction): coroutine iterator function that accepts
iterable values.
iterable (iterable|asynchronousiterable): an iterable collection
yielding coroutines functions.
limit (int): max iteration concurrency limit. Use ``0`` for no limit.
collect (bool): return yielded values from coroutines. Default False.
loop (asyncio.BaseEventLoop): optional event loop to use.
return_exceptions (bool): enable/disable returning exceptions in case
of error. `collect` param must be True.
timeout (int|float): timeout can be used to control the maximum number
of seconds to wait before returning. timeout can be an int or
float. If timeout is not specified or None, there is no limit to
the wait time.
*args (mixed): optional variadic arguments to pass to the
coroutine iterable function.
Returns:
results (list): ordered list of values yielded by coroutines
Raises:
TypeError: in case of invalid input arguments.
Usage::
async def mul_2(num):
return num * 2
await paco.each(mul_2, [1, 2, 3, 4, 5])
# => None
await paco.each(mul_2, [1, 2, 3, 4, 5], collect=True)
# => [2, 4, 6, 8, 10]
"""
assert_corofunction(coro=coro)
assert_iter(iterable=iterable)
# By default do not collect yielded values from coroutines
results = None
if collect:
# Store ordered results
results = [None] * len(iterable)
# Create concurrent executor
pool = ConcurrentExecutor(limit=limit, loop=loop)
@asyncio.coroutine
def collector(index, item):
result = yield from safe_run(coro(item, *args, **kw),
return_exceptions=return_exceptions)
if collect:
results[index] = result
return result
# Iterate and pass elements to coroutine
for index, value in enumerate(iterable):
pool.add(collector(index, value))
# Wait until all the coroutines finishes
yield from pool.run(return_exceptions=return_exceptions,
ignore_empty=True,
timeout=timeout)
# Returns list of mapped results in order
return results |
python | def list_csi_node(self, **kwargs):
"""
list or watch objects of kind CSINode
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_csi_node(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1CSINodeList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_csi_node_with_http_info(**kwargs)
else:
(data) = self.list_csi_node_with_http_info(**kwargs)
return data |
python | def get_activity_modification(self):
"""Extract INDRA ActiveForm statements from the BioPAX model.
This method extracts ActiveForm Statements that are due to
protein modifications. This method reuses the structure of
BioPAX Pattern's
org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern
with additional constraints to specify the gain or loss of a
modification occurring (phosphorylation, deubiquitination, etc.)
and the gain or loss of activity due to the modification state
change.
"""
mod_filter = 'residue modification, active'
for is_active in [True, False]:
p = self._construct_modification_pattern()
rel = mcct.GAIN if is_active else mcct.LOSS
p.add(mcc(rel, mod_filter),
"input simple PE", "output simple PE")
s = _bpp('Searcher')
res = s.searchPlain(self.model, p)
res_array = [_match_to_array(m) for m in res.toArray()]
for r in res_array:
reaction = r[p.indexOf('Conversion')]
activity = 'activity'
input_spe = r[p.indexOf('input simple PE')]
output_spe = r[p.indexOf('output simple PE')]
# Get the modifications
mod_in = \
BiopaxProcessor._get_entity_mods(input_spe)
mod_out = \
BiopaxProcessor._get_entity_mods(output_spe)
mod_shared = _get_mod_intersection(mod_in, mod_out)
gained_mods = _get_mod_difference(mod_out, mod_in)
# Here we get the evidence for the BiochemicalReaction
ev = self._get_evidence(reaction)
agents = self._get_agents_from_entity(output_spe)
for agent in _listify(agents):
static_mods = _get_mod_difference(agent.mods,
gained_mods)
# NOTE: with the ActiveForm representation we cannot
# separate static_mods and gained_mods. We assume here
# that the static_mods are inconsequential and therefore
# are not mentioned as an Agent condition, following
# don't care don't write semantics. Therefore only the
# gained_mods are listed in the ActiveForm as Agent
# conditions.
if gained_mods:
agent.mods = gained_mods
stmt = ActiveForm(agent, activity, is_active,
evidence=ev)
self.statements.append(decode_obj(stmt,
encoding='utf-8')) |
python | def ipblur(text): # brutalizer ;-)
""" blurs IP address """
import re
m = re.match(r'^(\d{1,3}\.\d{1,3}\.\d{1,3}\.)\d{1,3}.*', text)
if not m:
return text
return '%sxxx' % m.group(1) |
java | private Thread fullGetFirstQueuedThread() {
/*
* The first node is normally head.next. Try to get its
* thread field, ensuring consistent reads: If thread
* field is nulled out or s.prev is no longer head, then
* some other thread(s) concurrently performed setHead in
* between some of our reads. We try this twice before
* resorting to traversal.
*/
Node h, s;
Thread st;
if (((h = head) != null && (s = h.next) != null &&
s.prev == head && (st = s.thread) != null) ||
((h = head) != null && (s = h.next) != null &&
s.prev == head && (st = s.thread) != null))
return st;
/*
* Head's next field might not have been set yet, or may have
* been unset after setHead. So we must check to see if tail
* is actually first node. If not, we continue on, safely
* traversing from tail back to head to find first,
* guaranteeing termination.
*/
Thread firstThread = null;
for (Node p = tail; p != null && p != head; p = p.prev) {
Thread t = p.thread;
if (t != null)
firstThread = t;
}
return firstThread;
} |
python | def import_dashboards(path, recursive):
"""Import dashboards from JSON"""
p = Path(path)
files = []
if p.is_file():
files.append(p)
elif p.exists() and not recursive:
files.extend(p.glob('*.json'))
elif p.exists() and recursive:
files.extend(p.rglob('*.json'))
for f in files:
logging.info('Importing dashboard from file %s', f)
try:
with f.open() as data_stream:
dashboard_import_export.import_dashboards(
db.session, data_stream)
except Exception as e:
logging.error('Error when importing dashboard from file %s', f)
logging.error(e) |
python | async def _sync_revoc_for_proof(self, rr_id: str) -> None:
"""
Pick up tails file reader handle for input revocation registry identifier. If no symbolic
link is present, get the revocation registry definition to retrieve its tails file hash,
then find the tails file and link it.
Raise AbsentTails for missing corresponding tails file.
:param rr_id: revocation registry identifier
"""
LOGGER.debug('HolderProver._sync_revoc_for_proof >>> rr_id: %s', rr_id)
if not ok_rev_reg_id(rr_id):
LOGGER.debug('HolderProver._sync_revoc_for_proof <!< Bad rev reg id %s', rr_id)
raise BadIdentifier('Bad rev reg id {}'.format(rr_id))
(cd_id, tag) = rev_reg_id2cred_def_id_tag(rr_id)
try:
json.loads(await self.get_cred_def(cd_id))
except AbsentCredDef:
LOGGER.debug(
'HolderProver._sync_revoc_for_proof <!< corrupt tails tree %s may be for another ledger',
self._dir_tails)
raise AbsentCredDef('Corrupt tails tree {} may be for another ledger'.format(self._dir_tails))
except ClosedPool:
pass # carry on, may be OK from cache only
with REVO_CACHE.lock:
revo_cache_entry = REVO_CACHE.get(rr_id, None)
tails = revo_cache_entry.tails if revo_cache_entry else None
if tails is None: # it's not yet set in cache
try:
tails = await Tails(self._dir_tails, cd_id, tag).open()
except AbsentTails: # get hash from ledger and check for tails file
rr_def = json.loads(await self.get_rev_reg_def(rr_id))
tails_hash = rr_def['value']['tailsHash']
path_tails = join(Tails.dir(self._dir_tails, rr_id), tails_hash)
if not isfile(path_tails):
LOGGER.debug('HolderProver._sync_revoc_for_proof <!< No tails file present at %s', path_tails)
raise AbsentTails('No tails file present at {}'.format(path_tails))
Tails.associate(self._dir_tails, rr_id, tails_hash)
tails = await Tails(self._dir_tails, cd_id, tag).open() # OK now since tails file present
if revo_cache_entry is None:
REVO_CACHE[rr_id] = RevoCacheEntry(None, tails)
else:
REVO_CACHE[rr_id].tails = tails
LOGGER.debug('HolderProver._sync_revoc_for_proof <<<') |
python | def start(self):
"""
Given the pipeline topology starts ``Pipers`` in the order input ->
output. See ``Piper.start``. ``Pipers`` instances are started in two
stages, which allows them to share ``NuMaps``.
"""
# top - > bottom of pipeline
pipers = self.postorder()
#
for piper in pipers:
piper.start(stages=(0, 1))
for piper in pipers:
piper.start(stages=(2,)) |
java | public void suspend() {
if (suspension.compareAndSet(null, new CountDownLatch(1))) {
try {
suspendBarrier.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (BrokenBarrierException e) {
LOG.error("Exception during suspend: " + flowletContext, e);
}
}
} |
python | def default_reverse_key_func(full_key):
"""
Reverse of Django's default_key_func, i.e. undoing:
def default_key_func(key, key_prefix, version):
return '%s:%s:%s' % (key_prefix, version, key)
"""
match = reverse_key_re.match(full_key)
return match.group(3), match.group(1), int(match.group(2)) |
java | public static int littleInt(InputStream input) throws IOException {
char lo = littleChar(input);
char hi = littleChar(input);
return (hi << 16) | lo;
} |
java | @Override
public String getTypeDefinition(String typeName) throws AtlasException {
final IDataType dataType = typeSystem.getDataType(IDataType.class, typeName);
return TypesSerialization.toJson(typeSystem, dataType.getName());
} |
python | async def insert(query):
"""Perform INSERT query asynchronously. Returns last insert ID.
This function is called by object.create for single objects only.
"""
assert isinstance(query, peewee.Insert),\
("Error, trying to run insert coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
try:
if query._returning:
row = await cursor.fetchone()
result = row[0]
else:
database = _query_db(query)
last_id = await database.last_insert_id_async(cursor)
result = last_id
finally:
await cursor.release()
return result |
java | private void readFooter(RandomAccessInputStream raf, int location) throws IOException
{
raf.seek(location);
byte[] foot = new byte[FOOT_SIZE];
if (raf.read(foot) != FOOT_SIZE)
{
throw new IOException("Error encountered reading id3v2 footer");
}
majorVersion = (int) foot[3];
if (majorVersion <= NEW_MAJOR_VERSION)
{
minorVersion = (int) foot[4];
unsynchronisation = (foot[5]&0x80)!=0;
extended = (foot[5]&0x40)!=0;
experimental = (foot[5]&0x20)!=0;
footer = (foot[5]&0x10)!=0;
tagSize = Helpers.convertDWordToInt(foot, 6);
}
} |
python | def trace(self, context, obj):
"""Enumerate the children of the given object, as would be visible and utilized by dispatch."""
root = obj
if isroutine(obj):
yield Crumb(self, root, endpoint=True, handler=obj, options=opts(obj))
return
for name, attr in getmembers(obj if isclass(obj) else obj.__class__):
if name == '__getattr__':
sig = signature(attr)
path = '{' + list(sig.parameters.keys())[1] + '}'
reta = sig.return_annotation
if reta is not sig.empty:
if callable(reta) and not isclass(reta):
yield Crumb(self, root, path, endpoint=True, handler=reta, options=opts(reta))
else:
yield Crumb(self, root, path, handler=reta)
else:
yield Crumb(self, root, path, handler=attr)
del sig, path, reta
continue
elif name == '__call__':
yield Crumb(self, root, None, endpoint=True, handler=obj)
continue
if self.protect and name[0] == '_':
continue
yield Crumb(self, root, name,
endpoint=callable(attr) and not isclass(attr), handler=attr, options=opts(attr)) |
python | def parse_arg_list(param_start):
"""Exctract values like --libdir=bla/bla/bla
param_start must be '--libdir='
"""
values = [arg[len(param_start):]
for arg in sys.argv
if arg.startswith(param_start)]
# remove recognized arguments from the sys.argv
otherArgs = [arg
for arg in sys.argv
if not arg.startswith(param_start)]
sys.argv = otherArgs
return values |
python | def _coord_vertical_alignment(self):
"""performs vertical alignment according to current dirvh internal state.
"""
dirh,dirv = self.dirh,self.dirv
g = self.grx
for l in self.layers[::-dirv]:
if not l.prevlayer(): continue
r=None
for vk in l[::dirh]:
for m in l._medianindex(vk):
# take the median node in dirv layer:
um = l.prevlayer()[m]
# if vk is "free" align it with um's root
if g[vk].align is vk:
if dirv==1: vpair = (vk,um)
else: vpair = (um,vk)
# if vk<->um link is used for alignment
if (vpair not in self.conflicts) and \
(r==None or dirh*r<dirh*m):
g[um].align = vk
g[vk].root = g[um].root
g[vk].align = g[vk].root
r = m |
python | def halted(self):
"""Returns whether the CPU core was halted.
Args:
self (JLink): the ``JLink`` instance
Returns:
``True`` if the CPU core is halted, otherwise ``False``.
Raises:
JLinkException: on device errors.
"""
result = int(self._dll.JLINKARM_IsHalted())
if result < 0:
raise errors.JLinkException(result)
return (result > 0) |
java | public TaskScheduler getTaskScheduler()
{
if (m_taskScheduler == null)
m_taskScheduler = new PrivateTaskScheduler(this.getBaseApplet().getApplication(), 0, true);
return m_taskScheduler;
} |
java | @Override
public void setFontName(String fontName) {
if (this.fontName.equals(fontName)) {
return;
} else {
this.fontName = fontName;
makeFonts();
}
} |
java | @Override
public Destination getDestination(PeerDescriptor peer, DestinationReferenceDescriptor destinationReference, Session session) throws JMSException
{
try
{
Context jndiContext = JNDITools.getContext(peer.getJdniInitialContextFactoryName(),
peer.getProviderURL(),
null);
return (Destination)jndiContext.lookup(destinationReference.getDestinationName());
}
catch (NamingException e)
{
throw new JMSException("Cannot resolve destination in JNDI : "+e.toString());
}
} |
java | private void addLocalRuntimeDefinition(
final AvroLocalAppSubmissionParameters localAppSubmissionParams,
final AvroJobSubmissionParameters jobSubmissionParameters,
final MultiRuntimeDefinitionBuilder builder) {
// create and serialize local configuration if defined
final Configuration localModule = LocalDriverConfiguration.CONF
.set(LocalDriverConfiguration.MAX_NUMBER_OF_EVALUATORS,
localAppSubmissionParams.getMaxNumberOfConcurrentEvaluators())
// ROOT FOLDER will point to the current runtime directory
.set(LocalDriverConfiguration.ROOT_FOLDER, ".")
.set(LocalDriverConfiguration.JVM_HEAP_SLACK, 0.0)
.set(LocalDriverConfiguration.CLIENT_REMOTE_IDENTIFIER, ClientRemoteIdentifier.NONE)
.set(LocalDriverConfiguration.JOB_IDENTIFIER,
jobSubmissionParameters.getJobId().toString())
.set(LocalDriverConfiguration.RUNTIME_NAMES,
org.apache.reef.runtime.local.driver.RuntimeIdentifier.RUNTIME_NAME)
.build();
// add local runtime to the builder
builder.addRuntime(localModule, org.apache.reef.runtime.local.driver.RuntimeIdentifier.RUNTIME_NAME);
} |
java | @Override
protected void doTickEffekts(ChannelMemory aktMemo)
{
if (aktMemo.effekt==0 && aktMemo.effektParam==0) return;
switch (aktMemo.effekt)
{
case 0x00 : // Arpeggio
aktMemo.arpegioIndex = (aktMemo.arpegioIndex+1)%3;
int nextNotePeriod = aktMemo.arpegioNote[aktMemo.arpegioIndex];
if (nextNotePeriod!=0)
{
aktMemo.currentNotePeriod = nextNotePeriod;
setNewPlayerTuningFor(aktMemo);
}
break;
case 0x01: // Porta Up
aktMemo.currentNotePeriod -= aktMemo.portaStepUp;
if (aktMemo.glissando) aktMemo.currentNotePeriod = Helpers.getRoundedPeriod(aktMemo.currentNotePeriod>>4)<<4;
if (aktMemo.currentNotePeriod<aktMemo.portaStepUpEnd) aktMemo.currentNotePeriod = aktMemo.portaStepUpEnd;
setNewPlayerTuningFor(aktMemo);
break;
case 0x02: // Porta Down
aktMemo.currentNotePeriod += aktMemo.portaStepDown;
if (aktMemo.glissando) aktMemo.currentNotePeriod = Helpers.getRoundedPeriod(aktMemo.currentNotePeriod>>4)<<4;
if (aktMemo.currentNotePeriod>aktMemo.portaStepDownEnd) aktMemo.currentNotePeriod = aktMemo.portaStepDownEnd;
setNewPlayerTuningFor(aktMemo);
break;
case 0x03 : // Porta to Note
doPortaToNoteEffekt(aktMemo);
break;
case 0x04 : // Vibrato
doVibratoEffekt(aktMemo);
break;
case 0x05 : // Porta to Note + VolumeSlide
doPortaToNoteEffekt(aktMemo);
doVolumeSlideEffekt(aktMemo);
break;
case 0x06: // Vibrato + VolumeSlide
doVibratoEffekt(aktMemo);
doVolumeSlideEffekt(aktMemo);
break;
case 0x07 : // Tremolo
doTremoloEffekt(aktMemo);
break;
case 0x0A : // VolumeSlide
doVolumeSlideEffekt(aktMemo);
break;
case 0x0E : // Extended
switch (aktMemo.effektParam>>4)
{
case 0x9 : // Retrig Note
aktMemo.retrigCount--;
if (aktMemo.retrigCount<=0)
{
aktMemo.retrigCount = aktMemo.retrigMemo;
resetInstrument(aktMemo);
}
break;
case 0xC : // Note Cut
if (aktMemo.noteCutCount>0)
{
aktMemo.noteCutCount--;
if (aktMemo.noteCutCount<=0)
{
aktMemo.noteCutCount=-1;
aktMemo.currentVolume = 0;
}
}
break;
case 0xD: // Note Delay
if (aktMemo.noteDelayCount>0)
{
aktMemo.noteDelayCount--;
if (aktMemo.noteDelayCount<=0)
{
aktMemo.noteDelayCount = -1;
setNewInstrumentAndPeriod(aktMemo);
}
}
break;
}
break;
case 0x11 : // Global volume slide
doGlobalVolumeSlideEffekt();
break;
case 0x14 : // Key off
if (aktMemo.keyOffCounter>0)
{
aktMemo.keyOffCounter--;
if (aktMemo.keyOffCounter<=0)
{
aktMemo.keyOffCounter = -1;
aktMemo.keyOff = true;
}
}
break;
case 0x19 : // Panning slide
doPanningSlideEffekt(aktMemo);
break;
case 0x1B: // Multi retrig note
if (aktMemo.retrigVolSlide>0)
{
switch (aktMemo.retrigVolSlide)
{
case 0x1: aktMemo.currentVolume--; break;
case 0x2: aktMemo.currentVolume-=2; break;
case 0x3: aktMemo.currentVolume-=4; break;
case 0x4: aktMemo.currentVolume-=8; break;
case 0x5: aktMemo.currentVolume-=16; break;
case 0x6: aktMemo.currentVolume=(aktMemo.currentVolume<<1)/3; break;
case 0x7: aktMemo.currentVolume>>=1; break;
case 0x8: break;
case 0x9: aktMemo.currentVolume++; break;
case 0xA: aktMemo.currentVolume+=2; break;
case 0xB: aktMemo.currentVolume+=4; break;
case 0xC: aktMemo.currentVolume+=8; break;
case 0xD: aktMemo.currentVolume+=16; break;
case 0xE: aktMemo.currentVolume=(aktMemo.currentVolume*3)>>1; break;
case 0xF: aktMemo.currentVolume<<=1; break;
}
aktMemo.currentSetVolume = aktMemo.currentVolume;
}
aktMemo.retrigCount--;
if (aktMemo.retrigCount<=0)
{
aktMemo.retrigCount = aktMemo.retrigMemo;
resetInstrument(aktMemo);
}
break;
case 0x1D : // Tremor
doTremorEffekt(aktMemo);
break;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.