language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | @Override
public DescribeDeploymentsResult describeDeployments(DescribeDeploymentsRequest request) {
request = beforeClientExecution(request);
return executeDescribeDeployments(request);
} |
python | def get_unspents(address, blockchain_client):
""" Get the spendable transaction outputs, also known as UTXOs or
unspent transaction outputs.
NOTE: this will only return unspents if the address provided is present
in the bitcoind server. Use the chain, blockchain, or blockcypher API
to grab the unspents for arbitrary addresses.
"""
if isinstance(blockchain_client, BitcoindClient):
bitcoind = blockchain_client.bitcoind
version_byte = blockchain_client.version_byte
elif isinstance(blockchain_client, AuthServiceProxy):
bitcoind = blockchain_client
version_byte = 0
else:
raise Exception('A BitcoindClient object is required')
addresses = []
addresses.append(str(address))
min_confirmations = 0
max_confirmation = 2000000000 # just a very large number for max
unspents = bitcoind.listunspent(min_confirmations, max_confirmation,
addresses)
return format_unspents(unspents) |
python | def get_queue_system_lock(self, queue):
"""
Get system lock timeout
Returns time system lock expires or None if lock does not exist
"""
key = self._key(LOCK_REDIS_KEY, queue)
return Semaphore.get_system_lock(self.connection, key) |
python | def init(images, num_channels, dim='2d', stride=2,
kernel_size=7, maxpool=True, training=True, scope='init'):
"""Standard ResNet initial block used as first RevNet block.
Args:
images: [N, H, W, 3] tensor of input images to the model.
num_channels: Output depth of convolutional layer in initial block.
dim: '2d' if 2-dimensional, '3d' if 3-dimensional.
stride: stride for the convolution and pool layer.
kernel_size: Size of the initial convolution filter
maxpool: If true, apply a maxpool after the convolution
training: True for train phase, False for eval phase.
scope: Optional scope for the init block.
Returns:
Two [N, H, W, C] output activations from input images.
"""
conv = CONFIG[dim]['conv']
pool = CONFIG[dim]['max_pool']
with tf.variable_scope(scope):
net = conv(images, num_channels, kernel_size, strides=stride,
padding='SAME', activation=None)
net = tf.layers.batch_normalization(net, training=training)
net = tf.nn.relu(net)
if maxpool:
net = pool(net, pool_size=3, strides=stride)
x1, x2 = tf.split(net, 2, axis=CONFIG[dim]['split_axis'])
return x1, x2 |
python | def _compute_video_hash(videofile):
""" compute videofile's hash
reference: https://docs.google.com/document/d/1w5MCBO61rKQ6hI5m9laJLWse__yTYdRugpVyz4RzrmM/preview
"""
seek_positions = [None] * 4
hash_result = []
with open(videofile, 'rb') as fp:
total_size = os.fstat(fp.fileno()).st_size
if total_size < 8192 + 4096:
raise exceptions.InvalidFileError(
'the video[{}] is too small'.format(os.path.basename(videofile)))
seek_positions[0] = 4096
seek_positions[1] = total_size // 3 * 2
seek_positions[2] = total_size // 3
seek_positions[3] = total_size - 8192
for pos in seek_positions:
fp.seek(pos, 0)
data = fp.read(4096)
m = hashlib.md5(data)
hash_result.append(m.hexdigest())
return ';'.join(hash_result) |
python | def wrap_once(self, LayoutClass, *args, **kwargs):
"""
Wraps every layout object pointed in `self.slice` under a `LayoutClass` instance with
`args` and `kwargs` passed, unless layout object's parent is already a subclass of
`LayoutClass`.
"""
def wrap_object_once(layout_object, j):
if not isinstance(layout_object, LayoutClass):
layout_object.fields[j] = self.wrapped_object(
LayoutClass, layout_object.fields[j], *args, **kwargs
)
self.pre_map(wrap_object_once) |
python | def _check_location_part(cls, val, regexp):
"""Deprecated. See CourseLocator._check_location_part"""
cls._deprecation_warning()
return CourseLocator._check_location_part(val, regexp) |
python | def get_tracking_branch(self):
"""Returns (remote, branch) tuple, or None,None if there is no remote.
"""
try:
remote_uri = self.git("rev-parse", "--abbrev-ref",
"--symbolic-full-name", "@{u}")[0]
return remote_uri.split('/', 1)
except Exception as e:
# capitalization of message changed sometime between git 1.8.3
# and 2.12 - used to be "No upstream", now "no upstream"..
errmsg = str(e).lower()
if ("no upstream branch" not in errmsg
and "no upstream configured" not in errmsg):
raise e
return (None, None) |
java | public static String trimLines(String text) {
if (text == null) {
return null;
}
try {
StringBuffer output = new StringBuffer();
// First we strip multi line comments. I think this is important:
BufferedReader in = new BufferedReader(new StringReader(text));
while (true) {
String line = in.readLine();
if (line == null) {
break;
}
output.append(line.trim());
output.append('\n');
}
return output.toString();
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
} |
java | private void overrideConfigFromFile(Configuration config) throws IOException {
String configFile = GCS_CONFIG_OVERRIDE_FILE.get(config, config::get);
if (configFile != null) {
config.addResource(new FileInputStream(configFile));
}
} |
java | protected void importRelations() {
if (m_importedRelations.isEmpty()) {
return;
}
m_report.println(Messages.get().container(Messages.RPT_START_IMPORT_RELATIONS_0), I_CmsReport.FORMAT_HEADLINE);
int i = 0;
Iterator<Entry<String, List<CmsRelation>>> it = m_importedRelations.entrySet().iterator();
while (it.hasNext()) {
Entry<String, List<CmsRelation>> entry = it.next();
String resourcePath = entry.getKey();
List<CmsRelation> relations = entry.getValue();
m_report.print(
org.opencms.report.Messages.get().container(
org.opencms.report.Messages.RPT_SUCCESSION_2,
String.valueOf(i + 1),
String.valueOf(m_importedRelations.size())),
I_CmsReport.FORMAT_NOTE);
m_report.print(
Messages.get().container(
Messages.RPT_IMPORTING_RELATIONS_FOR_2,
resourcePath,
new Integer(relations.size())),
I_CmsReport.FORMAT_NOTE);
m_report.print(org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_DOTS_0));
boolean withErrors = false;
Iterator<CmsRelation> itRelations = relations.iterator();
while (itRelations.hasNext()) {
CmsRelation relation = itRelations.next();
try {
// Add the relation to the resource
m_cms.importRelation(
m_cms.getSitePath(relation.getSource(m_cms, CmsResourceFilter.ALL)),
m_cms.getSitePath(relation.getTarget(m_cms, CmsResourceFilter.ALL)),
relation.getType().getName());
} catch (CmsException e) {
m_report.addWarning(e);
withErrors = true;
if (LOG.isWarnEnabled()) {
LOG.warn(e.getLocalizedMessage());
}
if (LOG.isDebugEnabled()) {
LOG.debug(e.getLocalizedMessage(), e);
}
}
}
if (!withErrors) {
m_report.println(
org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_OK_0),
I_CmsReport.FORMAT_OK);
} else {
m_report.println(
org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_FAILED_0),
I_CmsReport.FORMAT_ERROR);
}
i++;
}
m_report.println(Messages.get().container(Messages.RPT_END_IMPORT_RELATIONS_0), I_CmsReport.FORMAT_HEADLINE);
} |
java | public static CaseTerminal resultOf(JcValue value) {
CaseExpression cx = new CaseExpression();
CaseTerminal ret = CaseFactory.createCaseTerminal(cx);
cx.setClauseType(ClauseType.CASE);
cx.setCaseValue(value);
return ret;
} |
python | def get_exon_ranges_for_transcript(self, transcript_id):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/overlap/id/{}?feature=exon".format(transcript_id)
r = self.ensembl_request(ext, headers)
exon_ranges = []
for exon in json.loads(r):
if exon["Parent"] != transcript_id:
continue
start = exon["start"]
end = exon["end"]
exon_ranges.append((start, end))
return exon_ranges |
python | def find_or_upsert(self, constructor, props, *, comp=None, return_status=False):
"""This finds or upserts a model with an auto primary key, and is a bit more flexible than
find_or_create.
First it looks for the model matching either comp, or props if comp is None.
If not found, it will try to upsert the model, doing nothing. If the returned model is new,
meaning it's primary key is not set, then the upsert was unable to create the model, meaning
there was a conflict. If there is a conflict, find model is run again, and this time it
will succeed*. Otherwise, the constructed model is returned.
*this is not entirely true. It's possible that the upsert returns with None, meaning that a
record was created between the first find and the upsert, and then deleted between the upsert
and the second find. This situation is out of the scope of this method. A possible solution
would be to repeat the find/uspert cycle until a model can be returned, but I'm going to avoid
that for simplicty for now.
:param constructor: the model constructor
:param props: the properties to construct the model with if not found
:param comp: the properties to search for the model with. If None, props is used
:param return_status: if True, a 2-tuple of (model, status) is returned, where status is what
occurred with the model. Either 'found', 'created' or 'duplicate'.
"""
model = self.find_model(constructor, comp or props)
status = _UPSERT_STATUS_FOUND
if model is None:
model = constructor(**props)
status = _UPSERT_STATUS_CREATED
self.insert_model(model, upsert=Upsert(Upsert.DO_NOTHING))
if model.is_new:
model = self.find_model(constructor, comp or props)
status = _UPSERT_STATUS_DUPLICATE
if return_status:
return (model, status)
else:
return model |
python | def element_screen_center(self, element):
"""
:returns: The center point of the element.
:rtype: class:`dict` with the field "left" set to the X
coordinate and the field "top" set to the Y
coordinate.
"""
pos = self.element_screen_position(element)
size = element.size
pos["top"] += int(size["height"] / 2)
pos["left"] += int(size["width"] / 2)
return pos |
java | private String translatePathExternal(String intpath) {
if (fullPath) {
return intpath;
} else {
return PathUtil.appendPath(rootPath.getPath(), intpath);
}
} |
java | @Override
public synchronized boolean offer(Runnable runnable) {
int allWorkingThreads = this.executor.getActiveCount() + super.size();
return allWorkingThreads < this.executor.getPoolSize() && super.offer(runnable);
} |
python | def add_dependencies(self, module):
"""
Adds a module and its dependencies to the list of dependencies.
Top-level entry point for adding a module and its dependecies.
"""
if module in self._processed_modules:
return None
if hasattr(module, "__name__"):
mn = module.__name__
else:
mn = '<unknown>'
_debug.debug("add_dependencies:module=%s", module)
# If the module in which the class/function is defined is __main__, don't add it. Just add its dependencies.
if mn == "__main__":
self._processed_modules.add(module)
# add the module as a dependency
elif not self._add_dependency(module, mn):
_debug.debug("add_dependencies:not added:module=%s", mn)
return None
_debug.debug("add_dependencies:ADDED:module=%s", mn)
# recursively get the module's imports and add those as dependencies
for dm in self._find_dependent_modules(module):
_debug.debug("add_dependencies:adding dependent module %s for %s", dm, mn)
self.add_dependencies(dm) |
java | public PropertyOwner retrieveUserProperties(String strRegistrationKey)
{
if ((strRegistrationKey == null) || (strRegistrationKey.length() == 0))
return this; // Use default user properties
UserProperties regKey = (UserProperties)m_htRegistration.get(strRegistrationKey);
if (regKey == null)
regKey = new UserProperties(this, strRegistrationKey);
regKey.bumpUseCount(+1);
return regKey;
} |
java | public static <T> T fillBeanWithMap(Map<?, ?> map, T bean, CopyOptions copyOptions) {
return fillBeanWithMap(map, bean, false, copyOptions);
} |
java | protected static <O> void updateMatrices(int size, MatrixParadigm mat, DBIDArrayMIter prots, PointerHierarchyRepresentationBuilder builder, Int2ObjectOpenHashMap<ModifiableDBIDs> clusters, DistanceQuery<O> dq, int c) {
final DBIDArrayIter ix = mat.ix, iy = mat.iy;
// c is the new cluster.
// Update entries (at (x,y) with x > y) in the matrix where x = c or y = c
// Update entries at (c,y) with y < c
ix.seek(c);
for(iy.seek(0); iy.getOffset() < c; iy.advance()) {
// Skip entry if already merged
if(builder.isLinked(iy)) {
continue;
}
updateEntry(mat, prots, clusters, dq, c, iy.getOffset());
}
// Update entries at (x,c) with x > c
iy.seek(c);
for(ix.seek(c + 1); ix.valid(); ix.advance()) {
// Skip entry if already merged
if(builder.isLinked(ix)) {
continue;
}
updateEntry(mat, prots, clusters, dq, ix.getOffset(), c);
}
} |
java | @SuppressWarnings("unchecked")
public List<CoreMap> getAnnotatedChunks(List<CoreLabel> tokens, int totalTokensOffset,
Class textKey, Class labelKey,
Class tokenChunkKey, Class tokenLabelKey)
{
List<CoreMap> chunks = new ArrayList();
LabelTagType prevTagType = null;
int tokenBegin = -1;
for (int i = 0; i < tokens.size(); i++) {
CoreLabel token = tokens.get(i);
String label = (String) token.get(labelKey);
LabelTagType curTagType = getTagType(label);
if (isEndOfChunk(prevTagType, curTagType)) {
int tokenEnd = i;
CoreMap chunk = ChunkAnnotationUtils.getAnnotatedChunk(tokens, tokenBegin, tokenEnd, totalTokensOffset,
tokenChunkKey, textKey, tokenLabelKey);
chunk.set(labelKey, prevTagType.type);
chunks.add(chunk);
tokenBegin = -1;
}
if (isStartOfChunk(prevTagType, curTagType)) {
if (tokenBegin >= 0) {
throw new RuntimeException("New chunk started, prev chunk not ended yet!");
}
tokenBegin = i;
}
prevTagType = curTagType;
}
if (tokenBegin >= 0) {
CoreMap chunk = ChunkAnnotationUtils.getAnnotatedChunk(tokens, tokenBegin, tokens.size(), totalTokensOffset,
tokenChunkKey, textKey, tokenLabelKey);
chunk.set(labelKey, prevTagType.type);
chunks.add(chunk);
}
// System.out.println("number of chunks " + chunks.size());
return chunks;
} |
python | def _segment_index(self, recarr, existing_index, start, new_segments):
"""
Generate index of datetime64 -> item offset.
Parameters:
-----------
new_data: new data being written (or appended)
existing_index: index field from the versions document of the previous version
start: first (0-based) offset of the new data
segments: list of offsets. Each offset is the row index of the
the last row of a particular chunk relative to the start of the _original_ item.
array(new_data) - segments = array(offsets in item)
Returns:
--------
Binary(compress(array([(index, datetime)]))
Where index is the 0-based index of the datetime in the DataFrame
"""
# find the index of the first datetime64 column
idx_col = self._datetime64_index(recarr)
# if one exists let's create the index on it
if idx_col is not None:
new_segments = np.array(new_segments, dtype='i8')
last_rows = recarr[new_segments - start]
# create numpy index
index = np.core.records.fromarrays([last_rows[idx_col]] + [new_segments, ], dtype=INDEX_DTYPE)
# append to existing index if exists
if existing_index:
# existing_index_arr is read-only but it's never written to
existing_index_arr = np.frombuffer(decompress(existing_index), dtype=INDEX_DTYPE)
if start > 0:
existing_index_arr = existing_index_arr[existing_index_arr['index'] < start]
index = np.concatenate((existing_index_arr, index))
return Binary(compress(index.tostring()))
elif existing_index:
raise ArcticException("Could not find datetime64 index in item but existing data contains one")
return None |
java | public Result compile(String[] argv, Context context) {
if (stdOut != null) {
context.put(Log.outKey, stdOut);
}
if (stdErr != null) {
context.put(Log.errKey, stdErr);
}
log = Log.instance(context);
if (argv.length == 0) {
OptionHelper h = new OptionHelper.GrumpyHelper(log) {
@Override
public String getOwnName() { return ownName; }
@Override
public void put(String name, String value) { }
};
try {
Option.HELP.process(h, "-help");
} catch (Option.InvalidValueException ignore) {
}
return Result.CMDERR;
}
// prefix argv with contents of environment variable and expand @-files
try {
argv = CommandLine.parse(ENV_OPT_NAME, argv);
} catch (UnmatchedQuote ex) {
error("err.unmatched.quote", ex.variableName);
return Result.CMDERR;
} catch (FileNotFoundException | NoSuchFileException e) {
warning("err.file.not.found", e.getMessage());
return Result.SYSERR;
} catch (IOException ex) {
log.printLines(PrefixKind.JAVAC, "msg.io");
ex.printStackTrace(log.getWriter(WriterKind.NOTICE));
return Result.SYSERR;
}
Arguments args = Arguments.instance(context);
args.init(ownName, argv);
if (log.nerrors > 0)
return Result.CMDERR;
Options options = Options.instance(context);
// init Log
boolean forceStdOut = options.isSet("stdout");
if (forceStdOut) {
log.flush();
log.setWriters(new PrintWriter(System.out, true));
}
// init CacheFSInfo
// allow System property in following line as a Mustang legacy
boolean batchMode = (options.isUnset("nonBatchMode")
&& System.getProperty("nonBatchMode") == null);
if (batchMode)
CacheFSInfo.preRegister(context);
boolean ok = true;
// init file manager
fileManager = context.get(JavaFileManager.class);
if (fileManager instanceof BaseFileManager) {
((BaseFileManager) fileManager).setContext(context); // reinit with options
ok &= ((BaseFileManager) fileManager).handleOptions(args.getDeferredFileManagerOptions());
}
// handle this here so it works even if no other options given
String showClass = options.get("showClass");
if (showClass != null) {
if (showClass.equals("showClass")) // no value given for option
showClass = "com.sun.tools.javac.Main";
showClass(showClass);
}
ok &= args.validate();
if (!ok || log.nerrors > 0)
return Result.CMDERR;
if (args.isEmpty())
return Result.OK;
// init Dependencies
if (options.isSet("debug.completionDeps")) {
Dependencies.GraphDependencies.preRegister(context);
}
// init plugins
Set<List<String>> pluginOpts = args.getPluginOpts();
if (!pluginOpts.isEmpty() || context.get(PlatformDescription.class) != null) {
BasicJavacTask t = (BasicJavacTask) BasicJavacTask.instance(context);
t.initPlugins(pluginOpts);
}
// init multi-release jar handling
if (fileManager.isSupportedOption(Option.MULTIRELEASE.primaryName) == 1) {
Target target = Target.instance(context);
List<String> list = List.of(target.multiReleaseValue());
fileManager.handleOption(Option.MULTIRELEASE.primaryName, list.iterator());
}
// init JavaCompiler
JavaCompiler comp = JavaCompiler.instance(context);
// init doclint
List<String> docLintOpts = args.getDocLintOpts();
if (!docLintOpts.isEmpty()) {
BasicJavacTask t = (BasicJavacTask) BasicJavacTask.instance(context);
t.initDocLint(docLintOpts);
}
if (options.get(Option.XSTDOUT) != null) {
// Stdout reassigned - ask compiler to close it when it is done
comp.closeables = comp.closeables.prepend(log.getWriter(WriterKind.NOTICE));
}
try {
comp.compile(args.getFileObjects(), args.getClassNames(), null, List.nil());
if (log.expectDiagKeys != null) {
if (log.expectDiagKeys.isEmpty()) {
log.printRawLines("all expected diagnostics found");
return Result.OK;
} else {
log.printRawLines("expected diagnostic keys not found: " + log.expectDiagKeys);
return Result.ERROR;
}
}
return (comp.errorCount() == 0) ? Result.OK : Result.ERROR;
} catch (OutOfMemoryError | StackOverflowError ex) {
resourceMessage(ex);
return Result.SYSERR;
} catch (FatalError ex) {
feMessage(ex, options);
return Result.SYSERR;
} catch (AnnotationProcessingError ex) {
apMessage(ex);
return Result.SYSERR;
} catch (PropagatedException ex) {
// TODO: what about errors from plugins? should not simply rethrow the error here
throw ex.getCause();
} catch (Throwable ex) {
// Nasty. If we've already reported an error, compensate
// for buggy compiler error recovery by swallowing thrown
// exceptions.
if (comp == null || comp.errorCount() == 0 || options.isSet("dev"))
bugMessage(ex);
return Result.ABNORMAL;
} finally {
if (comp != null) {
try {
comp.close();
} catch (ClientCodeException ex) {
throw new RuntimeException(ex.getCause());
}
}
}
} |
java | @POST
@Path("/_search")
public Response searchWithPost()
throws IOException {
return execute(getResourceRequest(RequestTypeEnum.POST, RestOperationTypeEnum.SEARCH_TYPE));
} |
python | def weights_multi_problem(labels, taskid=-1):
"""Assign weight 1.0 to only the "targets" portion of the labels.
Weight 1.0 is assigned to all labels past the taskid.
Args:
labels: A Tensor of int32s.
taskid: an int32 representing the task id for a problem.
Returns:
A Tensor of floats.
Raises:
ValueError: The Task ID must be valid.
"""
taskid = check_nonnegative(taskid)
past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1)
# Additionally zero out the task id location
past_taskid *= to_float(tf.not_equal(labels, taskid))
non_taskid = to_float(labels)
return to_float(tf.not_equal(past_taskid * non_taskid, 0)) |
python | def load_manuf(filename):
"""Load manuf file from Wireshark.
param:
- filename: the file to load the manuf file from"""
manufdb = ManufDA(_name=filename)
with open(filename, "rb") as fdesc:
for line in fdesc:
try:
line = line.strip()
if not line or line.startswith(b"#"):
continue
parts = line.split(None, 2)
oui, shrt = parts[:2]
lng = parts[2].lstrip(b"#").strip() if len(parts) > 2 else ""
lng = lng or shrt
manufdb[oui] = plain_str(shrt), plain_str(lng)
except Exception:
log_loading.warning("Couldn't parse one line from [%s] [%r]",
filename, line, exc_info=True)
return manufdb |
python | def get_monitored_hosting_devices_info(self, hd_state_filter=None):
"""
This function returns a list of all hosting devices monitored
by this agent
"""
wait_time = datetime.timedelta(
seconds=cfg.CONF.cfg_agent.hosting_device_dead_timeout)
resp = []
for hd_id in self.hosting_devices_backlog:
hd = self.hosting_devices_backlog[hd_id]['hd']
display_hd = True
if hd_state_filter is not None:
if hd['hd_state'] == hd_state_filter:
display_hd = True
else:
display_hd = False
if display_hd:
created_time = hd['created_at']
boottime = datetime.timedelta(seconds=hd['booting_time'])
backlogged_at = hd['backlog_insertion_ts']
booted_at = created_time + boottime
dead_at = backlogged_at + wait_time
resp.append({'host id': hd['id'],
'hd_state': hd['hd_state'],
'created at': str(created_time),
'backlogged at': str(backlogged_at),
'estimate booted at': str(booted_at),
'considered dead at': str(dead_at)})
else:
continue
return resp |
java | public void prepare(String sql, com.couchbase.lite.internal.database.sqlite.SQLiteStatementInfo outStatementInfo) {
if (sql == null) {
throw new IllegalArgumentException("sql must not be null.");
}
final int cookie = mRecentOperations.beginOperation("prepare", sql, null);
try {
final PreparedStatement statement = acquirePreparedStatement(sql);
try {
if (outStatementInfo != null) {
outStatementInfo.numParameters = statement.mNumParameters;
outStatementInfo.readOnly = statement.mReadOnly;
final int columnCount = nativeGetColumnCount(
mConnectionPtr, statement.mStatementPtr);
if (columnCount == 0) {
outStatementInfo.columnNames = EMPTY_STRING_ARRAY;
} else {
outStatementInfo.columnNames = new String[columnCount];
for (int i = 0; i < columnCount; i++) {
outStatementInfo.columnNames[i] = nativeGetColumnName(
mConnectionPtr, statement.mStatementPtr, i);
}
}
}
} finally {
releasePreparedStatement(statement);
}
} catch (RuntimeException ex) {
mRecentOperations.failOperation(cookie, ex);
throw ex;
} finally {
mRecentOperations.endOperation(cookie);
}
} |
java | public Collection<String> nearestLabels(@NonNull Collection<VocabWord> document, int topN) {
if (document.isEmpty())
throw new ND4JIllegalStateException("Impossible to get nearestLabels for empty list of words");
INDArray vector = inferVector(new ArrayList<VocabWord>(document));
return nearestLabels(vector, topN);
} |
java | public static Vector<Object> toXmlRpcRunnersParameters(Collection<Runner> runners)
{
Vector<Object> runnersParams = new Vector<Object>();
for(Runner runner : runners)
{
runnersParams.add(runner.marshallize());
}
return runnersParams;
} |
python | def disconnect(self, close=True):
"""
Logs off the session
:param close: Will close all tree connects in a session
"""
if not self._connected:
# already disconnected so let's return
return
if close:
for open in list(self.open_table.values()):
open.close(False)
for tree in list(self.tree_connect_table.values()):
tree.disconnect()
log.info("Session: %s - Logging off of SMB Session" % self.username)
logoff = SMB2Logoff()
log.info("Session: %s - Sending Logoff message" % self.username)
log.debug(str(logoff))
request = self.connection.send(logoff, sid=self.session_id)
log.info("Session: %s - Receiving Logoff response" % self.username)
res = self.connection.receive(request)
res_logoff = SMB2Logoff()
res_logoff.unpack(res['data'].get_value())
log.debug(str(res_logoff))
self._connected = False
del self.connection.session_table[self.session_id] |
python | def look_at(self, x, y, z):
"""Converges the two cameras to look at the specific point"""
for camera in self.cameras:
camera.look_at(x, y, z) |
java | static <S, E> GraphTraversal<S, E> union(Iterable<GraphTraversal<? super S, ? extends E>> traversals) {
return union(__.identity(), traversals);
} |
python | def docopt(doc, argv=None, help=True, version=None, options_first=False):
"""Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options precede positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
"""
argv = sys.argv[1:] if argv is None else argv
usage_sections = parse_section('usage:', doc)
if len(usage_sections) == 0:
raise DocoptLanguageError('"usage:" (case-insensitive) not found.')
if len(usage_sections) > 1:
raise DocoptLanguageError('More than one "usage:" (case-insensitive).')
DocoptExit.usage = usage_sections[0]
options = parse_defaults(doc)
pattern = parse_pattern(formal_usage(DocoptExit.usage), options)
# [default] syntax for argument is disabled
#for a in pattern.flat(Argument):
# same_name = [d for d in arguments if d.name == a.name]
# if same_name:
# a.value = same_name[0].value
argv = parse_argv(Tokens(argv), list(options), options_first)
pattern_options = set(pattern.flat(Option))
for options_shortcut in pattern.flat(OptionsShortcut):
doc_options = parse_defaults(doc)
options_shortcut.children = list(set(doc_options) - pattern_options)
#if any_options:
# options_shortcut.children += [Option(o.short, o.long, o.argcount)
# for o in argv if type(o) is Option]
extras(help, version, argv, doc)
matched, left, collected = pattern.fix().match(argv)
if matched and left == []: # better error message if left?
return Dict((a.name, a.value) for a in (pattern.flat() + collected))
raise DocoptExit() |
java | ObjectName findMatchingMBeanPattern(ObjectName pName) {
// Check all stored patterns for a match and return the pattern if one is found
for (ObjectName pattern : patterns) {
if (pattern.apply(pName)) {
return pattern;
}
}
return null;
} |
python | def _forward_mode(self, *args):
"""Forward mode differentiation for a constant"""
# Evaluate inner function self.f
X: np.ndarray
dX: np.ndarray
X, dX = self.f._forward_mode(*args)
# Alias the power to p for legibility
p: float = self.p
# The function value
val = X ** p
# The derivative
diff = p * X ** (p-1) * dX
return (val, diff) |
java | @GwtIncompatible("InputStream")
static InputStream asInputStream(final ByteInput input) {
checkNotNull(input);
return new InputStream() {
@Override
public int read() throws IOException {
return input.read();
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
checkNotNull(b);
checkPositionIndexes(off, off + len, b.length);
if (len == 0) {
return 0;
}
int firstByte = read();
if (firstByte == -1) {
return -1;
}
b[off] = (byte) firstByte;
for (int dst = 1; dst < len; dst++) {
int readByte = read();
if (readByte == -1) {
return dst;
}
b[off + dst] = (byte) readByte;
}
return len;
}
@Override
public void close() throws IOException {
input.close();
}
};
} |
java | protected void updateSequence() throws RepositoryException
{
checkIfOpened();
try
{
ResultSet count = updateNextOrderNumber(localMaxOrderNumber);
try
{
if (!count.next())
{
throw new RepositoryException("Could not update the sequence: "
+ "the returned value cannot be found");
}
}
finally
{
try
{
count.close();
}
catch (SQLException e)
{
LOG.error("Can't close the ResultSet: " + e.getMessage());
}
}
}
catch (SQLException e)
{
throw new RepositoryException(e);
}
} |
python | def create_new_address_for_user(self, user_id):
"""Create a new bitcoin address to accept payments for a User.
This is a convenience wrapper around `get_child` that helps you do
the right thing. This method always creates a public, non-prime
address that can be generated from a BIP32 public key on an
insecure server."""
max_id = 0x80000000
if user_id < 0 or user_id > max_id:
raise ValueError(
"Invalid UserID. Must be between 0 and %s" % max_id)
return self.get_child(user_id, is_prime=False, as_private=False) |
java | public static boolean areConvertibleClasses (@Nonnull final Class <?> aSrcClass, @Nonnull final Class <?> aDstClass)
{
ValueEnforcer.notNull (aSrcClass, "SrcClass");
ValueEnforcer.notNull (aDstClass, "DstClass");
// Same class?
if (aDstClass.equals (aSrcClass))
return true;
// Default assignable
if (aDstClass.isAssignableFrom (aSrcClass))
return true;
// Special handling for "int.class" == "Integer.class" etc.
if (aDstClass == getPrimitiveWrapperClass (aSrcClass))
return true;
if (aDstClass == getPrimitiveClass (aSrcClass))
return true;
// Not convertible
return false;
} |
java | static Map<String, String> resolveHeaders(Method method) {
Map<String, String> headerMap = new HashMap<>();
for (Header header : method.getDeclaringClass().getAnnotationsByType(Header.class)) {
headerMap.put(header.name(), header.value());
}
for (Header header : method.getAnnotationsByType(Header.class)) {
headerMap.put(header.name(), header.value());
}
return headerMap;
} |
java | @Override
public <T> AppEngineUpdate<E> set(Property<?, T> property, T value) {
if (property == null) {
throw new IllegalArgumentException("'property' must not be [" + property + "]");
}
properties.put(property, value);
return this;
} |
python | def _normalize_tz(val):
"""Normalizes all valid ISO8601 time zone variants to the one python will
parse.
:val: a timestamp string without a timezone, or with a timezone in one of the ISO8601 accepted
formats.
"""
match = _TZ_RE.match(val)
if match:
ts, tz = match.groups()
if len(tz) == 5:
# If the length of the tz is 5 then it is of the form (+|-)dddd, which is exactly what python
# wants, so just return it.
return ts + tz
if len(tz) == 6:
# If the length of the tz is 6 then it is of the form (+|-)dd:dd, just remove the colon
return ts + tz[:3] + tz[4:]
if tz == "Z" or tz == "z":
# If the tz is "Z" or 'z', return a timezone of +0000
return ts + "+0000"
else:
# Otherwise, the timzone must be of the format (+|-)dd, in which case we just need to add two
# "0" to it, and it will be in the proper format.
return ts + tz + "00"
else:
return val |
java | public static byte[] toByteArray(Streamable streamable) {
try {
final ByteArrayOutputStream baos = new ByteArrayOutputStream(streamable.size());
final DataOutputStream dos = new DataOutputStream(baos);
streamable.write(dos);
dos.flush();
baos.close();
return baos.toByteArray();
} catch (IOException e) {
throw new RuntimeException(e);
}
} |
java | public void setDriverClassName(String driver)
{
try
{
this.driver = (Driver) Class.forName(driver).newInstance();
}
catch (Exception e)
{
throw new RuntimeException("Unable to load driver: " + driver, e);
}
} |
java | public KeePassFile openDatabase(String password, InputStream keyFileStream) {
if (password == null) {
throw new IllegalArgumentException(MSG_EMPTY_MASTER_KEY);
}
if (keyFileStream == null) {
throw new IllegalArgumentException("You must provide a non-empty KeePass keyfile stream.");
}
try {
byte[] passwordBytes = password.getBytes(UTF_8);
byte[] hashedPassword = Sha256.hash(passwordBytes);
byte[] protectedBuffer = new KeyFileReader().readKeyFile(keyFileStream);
return new KeePassDatabaseReader(keepassHeader).decryptAndParseDatabase(ByteUtils.concat(hashedPassword, protectedBuffer), keepassFile);
} catch (UnsupportedEncodingException e) {
throw new UnsupportedOperationException(MSG_UTF8_NOT_SUPPORTED, e);
}
} |
python | def _to_dict(self):
''' Returns a dictionary representation of this object '''
return dict(minimum=self.minimum._to_dict(),
maximum=self.maximum._to_dict()) |
python | def main():
"""Main function for SPEAD receiver module."""
# Check command line arguments.
if len(sys.argv) < 2:
raise RuntimeError('Usage: python3 async_recv.py <json config>')
# Set up logging.
sip_logging.init_logger(show_thread=True)
# Load SPEAD configuration from JSON file.
# with open(sys.argv[-1]) as f:
# spead_config = json.load(f)
spead_config = json.loads(sys.argv[1])
# Set up the SPEAD receiver and run it (see method, above).
receiver = SpeadReceiver(spead_config)
receiver.run() |
python | def portTryReduce(root: LNode, port: LPort):
"""
Check if majority of children is connected to same port
if it is the case reduce children and connect this port instead children
:note: use reduceUselessAssignments, extractSplits, flattenTrees before this function
to maximize it's effect
"""
if not port.children:
return
for p in port.children:
portTryReduce(root, p)
target_nodes = {}
ch_cnt = countDirectlyConnected(port, target_nodes)
if not target_nodes:
# disconnected port
return
new_target, children_edge_to_destroy = max(target_nodes.items(),
key=lambda x: len(x[1]))
cnt = len(children_edge_to_destroy)
if cnt < ch_cnt / 2 or cnt == 1 and ch_cnt == 2:
# too small to few shared connection to reduce
return
children_to_destroy = set()
on_target_children_to_destroy = set()
for child, edge in children_edge_to_destroy:
if child.direction == PortType.OUTPUT:
target_ch = edge.dsts
elif child.direction == PortType.INPUT:
target_ch = edge.srcs
else:
raise ValueError(child.direction)
if len(target_ch) != 1:
raise NotImplementedError("multiple connected nodes", target_ch)
target_ch = target_ch[0]
try:
assert target_ch.parent is new_target, (
target_ch,
target_ch.parent,
new_target)
except AssertionError:
print('Wrong target:\n', edge.src, "\n", edge.dst,
"\n", target_ch.parent, "\n", new_target)
raise
if child.direction == PortType.OUTPUT:
edge.removeTarget(target_ch)
elif child.direction == PortType.INPUT:
edge.removeTarget(child)
if not edge.srcs or not edge.dsts:
edge.remove()
if not target_ch.incomingEdges and not target_ch.outgoingEdges:
# disconnect selected children from this port and target
on_target_children_to_destroy.add(target_ch)
if not child.incomingEdges and not child.outgoingEdges:
children_to_destroy.add(child)
# destroy children of new target and this port if possible
port.children = [
ch for ch in port.children if ch not in children_to_destroy]
new_target.children = [
ch for ch in new_target.children if ch not in on_target_children_to_destroy]
# connect this port to new target as it was connected by children before
# [TODO] names for new edges
if port.direction == PortType.OUTPUT:
root.addEdge(port, new_target)
elif port.direction == PortType.INPUT:
root.addEdge(new_target, port)
else:
raise NotImplementedError(port.direction) |
java | final public int[] readInputRegisters(int serverAddress, int startAddress, int quantity) throws
ModbusProtocolException, ModbusNumberException, ModbusIOException {
ModbusRequest request = ModbusRequestBuilder.getInstance().buildReadInputRegisters(serverAddress, startAddress, quantity);
ReadHoldingRegistersResponse response = (ReadInputRegistersResponse) processRequest(request);
return response.getRegisters();
} |
python | def parse_localinstancepath(parser, event, node):
"""Parse LOCALINSTANCEPATH element returning instancename
<!ELEMENT LOCALINSTANCEPATH (LOCALNAMESPACEPATH, INSTANCENAME)>
"""
#pylint: disable=unused-argument
(next_event, next_node) = six.next(parser)
if not _is_start(next_event, next_node, 'LOCALNAMESPACEPATH'):
raise ParseError('Expecting LOCALNAMESPACEPATH')
namespacepath = parse_localnamespacepath(parser, next_event, next_node)
(next_event, next_node) = six.next(parser)
if not _is_start(next_event, next_node, 'INSTANCENAME'):
raise ParseError('Expecting INSTANCENAME')
instancename = parse_instancename(parser, next_event, next_node)
instancename.namespace = namespacepath
return instancename |
python | def asxc(cls, obj):
"""Convert object into Xcfunc."""
if isinstance(obj, cls): return obj
if is_string(obj): return cls.from_name(obj)
raise TypeError("Don't know how to convert <%s:%s> to Xcfunc" % (type(obj), str(obj))) |
java | public OpenPgpV4Fingerprint generateAndImportKeyPair(BareJid ourJid)
throws NoSuchAlgorithmException, InvalidAlgorithmParameterException, NoSuchProviderException,
PGPException, IOException {
throwIfNoProviderSet();
OpenPgpStore store = provider.getStore();
PGPKeyRing keys = store.generateKeyRing(ourJid);
try {
store.importSecretKey(ourJid, keys.getSecretKeys());
store.importPublicKey(ourJid, keys.getPublicKeys());
} catch (MissingUserIdOnKeyException e) {
// This should never throw, since we set our jid literally one line above this comment.
throw new AssertionError(e);
}
OpenPgpV4Fingerprint fingerprint = new OpenPgpV4Fingerprint(keys.getSecretKeys());
store.setTrust(ourJid, fingerprint, OpenPgpTrustStore.Trust.trusted);
return fingerprint;
} |
java | public static base_responses update(nitro_service client, forwardingsession resources[]) throws Exception {
base_responses result = null;
if (resources != null && resources.length > 0) {
forwardingsession updateresources[] = new forwardingsession[resources.length];
for (int i=0;i<resources.length;i++){
updateresources[i] = new forwardingsession();
updateresources[i].name = resources[i].name;
updateresources[i].connfailover = resources[i].connfailover;
}
result = update_bulk_request(client, updateresources);
}
return result;
} |
java | public TableFactor multiply(TableFactor other) {
// Calculate the result domain
List<Integer> domain = new ArrayList<>();
List<Integer> otherDomain = new ArrayList<>();
List<Integer> resultDomain = new ArrayList<>();
for (int n : neighborIndices) {
domain.add(n);
resultDomain.add(n);
}
for (int n : other.neighborIndices) {
otherDomain.add(n);
if (!resultDomain.contains(n)) resultDomain.add(n);
}
// Create result TableFactor
int[] resultNeighborIndices = new int[resultDomain.size()];
int[] resultDimensions = new int[resultNeighborIndices.length];
for (int i = 0; i < resultDomain.size(); i++) {
int var = resultDomain.get(i);
resultNeighborIndices[i] = var;
// assert consistency about variable size, we can't have the same variable with two different sizes
assert ((getVariableSize(var) == 0 && other.getVariableSize(var) > 0) ||
(getVariableSize(var) > 0 && other.getVariableSize(var) == 0) ||
(getVariableSize(var) == other.getVariableSize(var)));
resultDimensions[i] = Math.max(getVariableSize(resultDomain.get(i)), other.getVariableSize(resultDomain.get(i)));
}
TableFactor result = new TableFactor(resultNeighborIndices, resultDimensions);
// OPTIMIZATION:
// If we're a factor of size 2 receiving a message of size 1, then we can optimize that pretty heavily
// We could just use the general algorithm at the end of this set of special cases, but this is the fastest way
if (otherDomain.size() == 1 && (resultDomain.size() == domain.size()) && domain.size() == 2) {
int msgVar = otherDomain.get(0);
int msgIndex = resultDomain.indexOf(msgVar);
if (msgIndex == 0) {
for (int i = 0; i < resultDimensions[0]; i++) {
double d = other.values[i];
int k = i * resultDimensions[1];
for (int j = 0; j < resultDimensions[1]; j++) {
int index = k + j;
result.values[index] = values[index] + d;
assert !Double.isNaN(values[index]);
}
}
} else if (msgIndex == 1) {
for (int i = 0; i < resultDimensions[0]; i++) {
int k = i * resultDimensions[1];
for (int j = 0; j < resultDimensions[1]; j++) {
int index = k + j;
result.values[index] = values[index] + other.values[j];
assert !Double.isNaN(values[index]);
}
}
}
}
// OPTIMIZATION:
// The special case where we're a message of size 1, and the other factor is receiving the message, and of size 2
else if (domain.size() == 1 && (resultDomain.size() == otherDomain.size()) && resultDomain.size() == 2) {
return other.multiply(this);
}
// Otherwise we follow the big comprehensive, slow general purpose algorithm
else {
// Calculate back-pointers from the result domain indices to original indices
int[] mapping = new int[result.neighborIndices.length];
int[] otherMapping = new int[result.neighborIndices.length];
for (int i = 0; i < result.neighborIndices.length; i++) {
mapping[i] = domain.indexOf(result.neighborIndices[i]);
otherMapping[i] = otherDomain.indexOf(result.neighborIndices[i]);
}
// Do the actual joining operation between the two tables, applying 'join' for each result element.
int[] assignment = new int[neighborIndices.length];
int[] otherAssignment = new int[other.neighborIndices.length];
// OPTIMIZATION:
// Rather than use the standard iterator, which creates lots of int[] arrays on the heap, which need to be GC'd,
// we use the fast version that just mutates one array. Since this is read once for us here, this is ideal.
Iterator<int[]> fastPassByReferenceIterator = result.fastPassByReferenceIterator();
int[] resultAssignment = fastPassByReferenceIterator.next();
while (true) {
// Set the assignment arrays correctly
for (int i = 0; i < resultAssignment.length; i++) {
if (mapping[i] != -1) assignment[mapping[i]] = resultAssignment[i];
if (otherMapping[i] != -1) otherAssignment[otherMapping[i]] = resultAssignment[i];
}
result.setAssignmentLogValue(resultAssignment, getAssignmentLogValue(assignment) + other.getAssignmentLogValue(otherAssignment));
// This mutates the resultAssignment[] array, rather than creating a new one
if (fastPassByReferenceIterator.hasNext()) fastPassByReferenceIterator.next();
else break;
}
}
return result;
} |
java | public String getValue(Option option) {
if (option == null) {
return null;
}
String[] values = getValues(option);
return (values == null ? null : values[0]);
} |
python | def write(self, data):
"""
Writes data to the device.
:param data: data to write
:type data: string
:returns: number of bytes sent
:raises: :py:class:`~alarmdecoder.util.CommError`
"""
data_sent = None
try:
if isinstance(data, str):
data = data.encode('utf-8')
data_sent = self._device.send(data)
if data_sent == 0:
raise CommError('Error writing to device.')
self.on_write(data=data)
except (SSL.Error, socket.error) as err:
raise CommError('Error writing to device.', err)
return data_sent |
java | public int size() {
int size = 0;
for(Ta a: getKeys()) {
for(Tb b: get2ndValues(a)) {
size += get3rdValues(a, b).size();
}
}
return size;
} |
python | def HasIndex(self, index):
"""
Flag indicating the index exists in any of the spent coin items.
Args:
index (int):
Returns:
"""
for i in self.Items:
if i.index == index:
return True
return False |
java | @Deprecated
public static Pattern some(int min, int max, CharPredicate predicate) {
return times(min, max, predicate);
} |
python | def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None,
lock_class=None, thread_local=True):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``lock_class`` forces the specified lock implementation.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage. """
if lock_class is None:
if self._use_lua_lock is None:
# the first time .lock() is called, determine if we can use
# Lua by attempting to register the necessary scripts
try:
LuaLock.register_scripts(self)
self._use_lua_lock = True
except ResponseError:
self._use_lua_lock = False
lock_class = self._use_lua_lock and LuaLock or Lock
return lock_class(self, name, timeout=timeout, sleep=sleep,
blocking_timeout=blocking_timeout,
thread_local=thread_local) |
java | private boolean decode_Segmentlist(AnnouncementParmValue annPaVa) throws ParserException {
boolean decoded = false;
decoded = decode_SegmentDescriptor(annPaVa);
if (decoded) {
boolean f = true;
while (f && index < totalChars) {
if (chars[index] == ',') {
index++;
f = decode_SegmentDescriptor(annPaVa);
} else {
f = false;
}
}
}
return decoded;
} |
python | async def async_delete_api_key(session, host, port, api_key):
"""Delete API key from deCONZ."""
url = 'http://{host}:{port}/api/{api_key}/config/whitelist/{api_key}'.format(
host=host, port=str(port), api_key=api_key)
response = await async_request(session.delete, url)
_LOGGER.info(response) |
python | async def make_response(self, request, response, **response_kwargs):
"""Convert a handler result to web response."""
while iscoroutine(response):
response = await response
if isinstance(response, StreamResponse):
return response
response_kwargs.setdefault('content_type', 'application/json')
return Response(text=dumps(response), **response_kwargs) |
python | def collect_episodes(local_evaluator=None,
remote_evaluators=[],
timeout_seconds=180):
"""Gathers new episodes metrics tuples from the given evaluators."""
pending = [
a.apply.remote(lambda ev: ev.get_metrics()) for a in remote_evaluators
]
collected, _ = ray.wait(
pending, num_returns=len(pending), timeout=timeout_seconds * 1.0)
num_metric_batches_dropped = len(pending) - len(collected)
if pending and len(collected) == 0:
raise ValueError(
"Timed out waiting for metrics from workers. You can configure "
"this timeout with `collect_metrics_timeout`.")
metric_lists = ray_get_and_free(collected)
if local_evaluator:
metric_lists.append(local_evaluator.get_metrics())
episodes = []
for metrics in metric_lists:
episodes.extend(metrics)
return episodes, num_metric_batches_dropped |
java | protected DataSourceMetadataUpdateResult updateDataSourceMetadataWithHandle(
final Handle handle,
final String dataSource,
final DataSourceMetadata startMetadata,
final DataSourceMetadata endMetadata
) throws IOException
{
Preconditions.checkNotNull(dataSource, "dataSource");
Preconditions.checkNotNull(startMetadata, "startMetadata");
Preconditions.checkNotNull(endMetadata, "endMetadata");
final byte[] oldCommitMetadataBytesFromDb = getDataSourceMetadataWithHandleAsBytes(handle, dataSource);
final String oldCommitMetadataSha1FromDb;
final DataSourceMetadata oldCommitMetadataFromDb;
if (oldCommitMetadataBytesFromDb == null) {
oldCommitMetadataSha1FromDb = null;
oldCommitMetadataFromDb = null;
} else {
oldCommitMetadataSha1FromDb = BaseEncoding.base16().encode(
Hashing.sha1().hashBytes(oldCommitMetadataBytesFromDb).asBytes()
);
oldCommitMetadataFromDb = jsonMapper.readValue(oldCommitMetadataBytesFromDb, DataSourceMetadata.class);
}
final boolean startMetadataMatchesExisting;
if (oldCommitMetadataFromDb == null) {
startMetadataMatchesExisting = startMetadata.isValidStart();
} else {
// Checking against the last committed metadata.
// Converting the last one into start metadata for checking since only the same type of metadata can be matched.
// Even though kafka/kinesis indexing services use different sequenceNumber types for representing
// start and end sequenceNumbers, the below conversion is fine because the new start sequenceNumbers are supposed
// to be same with end sequenceNumbers of the last commit.
startMetadataMatchesExisting = startMetadata.asStartMetadata().matches(oldCommitMetadataFromDb.asStartMetadata());
}
if (!startMetadataMatchesExisting) {
// Not in the desired start state.
log.error(
"Not updating metadata, existing state[%s] in metadata store doesn't match to the new start state[%s].",
oldCommitMetadataFromDb,
startMetadata
);
return DataSourceMetadataUpdateResult.FAILURE;
}
// Only endOffsets should be stored in metadata store
final DataSourceMetadata newCommitMetadata = oldCommitMetadataFromDb == null
? endMetadata
: oldCommitMetadataFromDb.plus(endMetadata);
final byte[] newCommitMetadataBytes = jsonMapper.writeValueAsBytes(newCommitMetadata);
final String newCommitMetadataSha1 = BaseEncoding.base16().encode(
Hashing.sha1().hashBytes(newCommitMetadataBytes).asBytes()
);
final DataSourceMetadataUpdateResult retVal;
if (oldCommitMetadataBytesFromDb == null) {
// SELECT -> INSERT can fail due to races; callers must be prepared to retry.
final int numRows = handle.createStatement(
StringUtils.format(
"INSERT INTO %s (dataSource, created_date, commit_metadata_payload, commit_metadata_sha1) "
+ "VALUES (:dataSource, :created_date, :commit_metadata_payload, :commit_metadata_sha1)",
dbTables.getDataSourceTable()
)
)
.bind("dataSource", dataSource)
.bind("created_date", DateTimes.nowUtc().toString())
.bind("commit_metadata_payload", newCommitMetadataBytes)
.bind("commit_metadata_sha1", newCommitMetadataSha1)
.execute();
retVal = numRows == 1 ? DataSourceMetadataUpdateResult.SUCCESS : DataSourceMetadataUpdateResult.TRY_AGAIN;
} else {
// Expecting a particular old metadata; use the SHA1 in a compare-and-swap UPDATE
final int numRows = handle.createStatement(
StringUtils.format(
"UPDATE %s SET "
+ "commit_metadata_payload = :new_commit_metadata_payload, "
+ "commit_metadata_sha1 = :new_commit_metadata_sha1 "
+ "WHERE dataSource = :dataSource AND commit_metadata_sha1 = :old_commit_metadata_sha1",
dbTables.getDataSourceTable()
)
)
.bind("dataSource", dataSource)
.bind("old_commit_metadata_sha1", oldCommitMetadataSha1FromDb)
.bind("new_commit_metadata_payload", newCommitMetadataBytes)
.bind("new_commit_metadata_sha1", newCommitMetadataSha1)
.execute();
retVal = numRows == 1 ? DataSourceMetadataUpdateResult.SUCCESS : DataSourceMetadataUpdateResult.TRY_AGAIN;
}
if (retVal == DataSourceMetadataUpdateResult.SUCCESS) {
log.info("Updated metadata from[%s] to[%s].", oldCommitMetadataFromDb, newCommitMetadata);
} else {
log.info("Not updating metadata, compare-and-swap failure.");
}
return retVal;
} |
python | def process(self, raster):
""" Applies the morphological operation to the mask object
"""
dim = len(raster.shape)
if dim == 3:
for dim in range(raster.shape[2]):
raster[:, :, dim] = self.morph_operation(raster[:, :, dim], self.struct_elem)
elif dim == 4:
for time, dim in it.product(range(raster.shape[0]), range(raster.shape[3])):
raster[time, :, :, dim] = self.morph_operation(raster[time, :, :, dim], self.struct_elem)
else:
raise ValueError('Invalid number of dimensions: {}'.format(dim))
return raster |
java | private static void doWork() {
derivedDoubleGauge.createTimeSeries(
labelValues,
blockingQueue,
new ToDoubleFunction<LinkedBlockingQueue>() {
@Override
public double applyAsDouble(LinkedBlockingQueue queue) {
return queue.size();
}
});
// Your code here.
} |
java | public void delete_property(String name, String[] propnames) throws DevFailed {
databaseDAO.delete_property(this, name, propnames);
} |
java | public void removeExtension() {
try {
if (extendedRelationsDao.isTableExists()) {
List<ExtendedRelation> extendedRelations = extendedRelationsDao
.queryForAll();
for (ExtendedRelation extendedRelation : extendedRelations) {
geoPackage.deleteTable(extendedRelation
.getMappingTableName());
}
geoPackage.dropTable(extendedRelationsDao.getTableName());
}
if (extensionsDao.isTableExists()) {
extensionsDao.deleteByExtension(EXTENSION_NAME);
}
} catch (SQLException e) {
throw new GeoPackageException(
"Failed to delete Related Tables extension and table. GeoPackage: "
+ geoPackage.getName(), e);
}
} |
python | def Y_less(self):
"""Decrease the scaling."""
self.parent.value('y_scale', self.parent.value('y_scale') / 2)
self.parent.traces.display() |
java | @Override
public CommerceOrderNote fetchByC_ERC(long companyId,
String externalReferenceCode) {
return fetchByC_ERC(companyId, externalReferenceCode, true);
} |
java | private static String readAll(@Nonnull final Reader reader) throws IOException {
final StringBuilder buffer = new StringBuilder();
int cp;
while ((cp = reader.read()) != -1) {
buffer.append((char) cp);
}
return buffer.toString();
} |
python | def get_project_by_network_id(network_id,**kwargs):
"""
get a project complexmodel by a network_id
"""
user_id = kwargs.get('user_id')
projects_i = db.DBSession.query(Project).join(ProjectOwner).join(Network, Project.id==Network.project_id).filter(
Network.id==network_id,
ProjectOwner.user_id==user_id).order_by('name').all()
ret_project = None
for project_i in projects_i:
try:
project_i.check_read_permission(user_id)
ret_project = project_i
except:
log.info("Can't return project %s. User %s does not have permission to read it.", project_i.id, user_id)
return ret_project |
java | public void call(Result result)
{
if (result.isException())
{
statistics.exception();
annotable.annotate( Annotations.exception( result.getException() ) );
return;
}
annotable.annotate( new Annotation()
{
public void writeDown(Text text)
{
StringBuilder content = new StringBuilder( text.getContent() );
Object[] arguments = message.arguments();
MatchResult matchResult = message.matchResult();
// Backwards since we are replacing text with real positions
for (int index = arguments.length - 1; index >= 0; index--)
{
int start = matchResult.start( index + 1 );
int end = matchResult.end( index + 1 );
if (arguments[index] instanceof Expectation)
{
Expectation expectation = (Expectation) arguments[index];
if (expectation.meets())
{
String span = String.format( "<span style='%s: %s;'>%s</span>", Styles.BACKGROUND_COLOR, Colors.GREEN, expectation.getExpected() );
content.replace( start, end, span );
statistics.right();
}
else
{
String span = String.format( "<span style='%s: %s;'>%s</span>", Styles.BACKGROUND_COLOR, Colors.RED, expectation.getDescribe() );
content.replace( start, end, span );
statistics.wrong();
}
}
}
text.setContent( content.toString() );
}
} );
} |
java | public boolean matches(String prefix, Object actual, Object expected,
JSONCompareResult result) throws ValueMatcherException {
if (comparator instanceof LocationAwareValueMatcher) {
return ((LocationAwareValueMatcher<Object>)comparator).equal(prefix, actual, expected, result);
}
return comparator.equal(actual, expected);
} |
java | @SuppressWarnings("checkstyle:methodlength")
public void registerConfigLocally(IdentifiedDataSerializable newConfig,
ConfigCheckMode configCheckMode) {
IdentifiedDataSerializable currentConfig = null;
if (newConfig instanceof MultiMapConfig) {
MultiMapConfig multiMapConfig = (MultiMapConfig) newConfig;
currentConfig = multiMapConfigs.putIfAbsent(multiMapConfig.getName(), multiMapConfig);
} else if (newConfig instanceof MapConfig) {
MapConfig newMapConfig = (MapConfig) newConfig;
currentConfig = mapConfigs.putIfAbsent(newMapConfig.getName(), newMapConfig);
if (currentConfig == null) {
listener.onConfigRegistered(newMapConfig);
}
} else if (newConfig instanceof CardinalityEstimatorConfig) {
CardinalityEstimatorConfig cardinalityEstimatorConfig = (CardinalityEstimatorConfig) newConfig;
currentConfig = cardinalityEstimatorConfigs.putIfAbsent(cardinalityEstimatorConfig.getName(),
cardinalityEstimatorConfig);
} else if (newConfig instanceof RingbufferConfig) {
RingbufferConfig ringbufferConfig = (RingbufferConfig) newConfig;
currentConfig = ringbufferConfigs.putIfAbsent(ringbufferConfig.getName(), ringbufferConfig);
} else if (newConfig instanceof LockConfig) {
LockConfig lockConfig = (LockConfig) newConfig;
currentConfig = lockConfigs.putIfAbsent(lockConfig.getName(), lockConfig);
} else if (newConfig instanceof AtomicLongConfig) {
AtomicLongConfig atomicLongConfig = (AtomicLongConfig) newConfig;
currentConfig = atomicLongConfigs.putIfAbsent(atomicLongConfig.getName(), atomicLongConfig);
} else if (newConfig instanceof AtomicReferenceConfig) {
AtomicReferenceConfig atomicReferenceConfig = (AtomicReferenceConfig) newConfig;
currentConfig = atomicReferenceConfigs.putIfAbsent(atomicReferenceConfig.getName(), atomicReferenceConfig);
} else if (newConfig instanceof CountDownLatchConfig) {
CountDownLatchConfig countDownLatchConfig = (CountDownLatchConfig) newConfig;
currentConfig = countDownLatchConfigs.putIfAbsent(countDownLatchConfig.getName(), countDownLatchConfig);
} else if (newConfig instanceof ListConfig) {
ListConfig listConfig = (ListConfig) newConfig;
currentConfig = listConfigs.putIfAbsent(listConfig.getName(), listConfig);
} else if (newConfig instanceof SetConfig) {
SetConfig setConfig = (SetConfig) newConfig;
currentConfig = setConfigs.putIfAbsent(setConfig.getName(), setConfig);
} else if (newConfig instanceof ReplicatedMapConfig) {
ReplicatedMapConfig replicatedMapConfig = (ReplicatedMapConfig) newConfig;
currentConfig = replicatedMapConfigs.putIfAbsent(replicatedMapConfig.getName(), replicatedMapConfig);
} else if (newConfig instanceof TopicConfig) {
TopicConfig topicConfig = (TopicConfig) newConfig;
currentConfig = topicConfigs.putIfAbsent(topicConfig.getName(), topicConfig);
} else if (newConfig instanceof ExecutorConfig) {
ExecutorConfig executorConfig = (ExecutorConfig) newConfig;
currentConfig = executorConfigs.putIfAbsent(executorConfig.getName(), executorConfig);
} else if (newConfig instanceof DurableExecutorConfig) {
DurableExecutorConfig durableExecutorConfig = (DurableExecutorConfig) newConfig;
currentConfig = durableExecutorConfigs.putIfAbsent(durableExecutorConfig.getName(), durableExecutorConfig);
} else if (newConfig instanceof ScheduledExecutorConfig) {
ScheduledExecutorConfig scheduledExecutorConfig = (ScheduledExecutorConfig) newConfig;
currentConfig = scheduledExecutorConfigs.putIfAbsent(scheduledExecutorConfig.getName(), scheduledExecutorConfig);
} else if (newConfig instanceof QueueConfig) {
QueueConfig queueConfig = (QueueConfig) newConfig;
currentConfig = queueConfigs.putIfAbsent(queueConfig.getName(), queueConfig);
} else if (newConfig instanceof ReliableTopicConfig) {
ReliableTopicConfig reliableTopicConfig = (ReliableTopicConfig) newConfig;
currentConfig = reliableTopicConfigs.putIfAbsent(reliableTopicConfig.getName(), reliableTopicConfig);
} else if (newConfig instanceof CacheSimpleConfig) {
CacheSimpleConfig cacheSimpleConfig = (CacheSimpleConfig) newConfig;
currentConfig = cacheSimpleConfigs.putIfAbsent(cacheSimpleConfig.getName(), cacheSimpleConfig);
if (currentConfig == null) {
listener.onConfigRegistered(cacheSimpleConfig);
}
} else if (newConfig instanceof EventJournalConfig) {
EventJournalConfig eventJournalConfig = (EventJournalConfig) newConfig;
registerEventJournalConfig(eventJournalConfig, configCheckMode);
} else if (newConfig instanceof MerkleTreeConfig) {
MerkleTreeConfig config = (MerkleTreeConfig) newConfig;
currentConfig = mapMerkleTreeConfigs.putIfAbsent(config.getMapName(), config);
} else if (newConfig instanceof SemaphoreConfig) {
SemaphoreConfig semaphoreConfig = (SemaphoreConfig) newConfig;
currentConfig = semaphoreConfigs.putIfAbsent(semaphoreConfig.getName(), semaphoreConfig);
} else if (newConfig instanceof FlakeIdGeneratorConfig) {
FlakeIdGeneratorConfig config = (FlakeIdGeneratorConfig) newConfig;
currentConfig = flakeIdGeneratorConfigs.putIfAbsent(config.getName(), config);
} else if (newConfig instanceof PNCounterConfig) {
PNCounterConfig config = (PNCounterConfig) newConfig;
currentConfig = pnCounterConfigs.putIfAbsent(config.getName(), config);
} else {
throw new UnsupportedOperationException("Unsupported config type: " + newConfig);
}
checkCurrentConfigNullOrEqual(configCheckMode, currentConfig, newConfig);
} |
java | public static <T> boolean anyValuesInCommon(Collection<T> c1, Collection<T> c2)
{
// Let's always iterate over smaller collection:
if (c1.size() > c2.size()) {
Collection<T> tmp = c1;
c1 = c2;
c2 = tmp;
}
Iterator<T> it = c1.iterator();
while (it.hasNext()) {
if (c2.contains(it.next())) {
return true;
}
}
return false;
} |
python | def validate_signature(filename):
""" Remove invalid signatures from a binary file
If the file signature is missing or valid then it will be ignored
Invalid signatures are replaced with an ad-hoc signature. This is the
closest you can get to removing a signature on MacOS
Parameters
----------
filename : str
Filepath to a binary file
"""
out, err = back_tick(['codesign', '--verify', filename],
ret_err=True, as_str=True, raise_err=False)
if not err:
return # The existing signature is valid
if 'code object is not signed at all' in err:
return # File has no signature, and adding a new one isn't necessary
# This file's signature is invalid and needs to be replaced
replace_signature(filename, '-') |
python | def list(path, filename=None, start=None, stop=None, recursive=False, directories=False):
"""
List files specified by dataPath.
Datapath may include a single wildcard ('*') in the filename specifier.
Returns sorted list of absolute path strings.
"""
path = uri_to_path(path)
if not filename and recursive:
return listrecursive(path)
if filename:
if os.path.isdir(path):
path = os.path.join(path, filename)
else:
path = os.path.join(os.path.dirname(path), filename)
else:
if os.path.isdir(path) and not directories:
path = os.path.join(path, "*")
files = glob.glob(path)
if not directories:
files = [fpath for fpath in files if not os.path.isdir(fpath)]
files.sort()
files = select(files, start, stop)
return files |
java | @Override
public RandomVariable getValue(double evaluationTime, LIBORModelMonteCarloSimulationModel model) throws CalculationException {
/*
* Calculate value of the swap at exercise date on each path (beware of perfect forsight - all rates are simulationTime=exerciseDate)
*/
RandomVariable valueFixLeg = new RandomVariableFromDoubleArray(fixingDates[fixingDates.length-1], 0.0);
RandomVariable valueFloatLeg = new RandomVariableFromDoubleArray(paymentDates[paymentDates.length-1], -1.0);
// Calculate the value of the swap by working backward through all periods
for(int period=fixingDates.length-1; period>=0; period--)
{
double fixingDate = fixingDates[period];
double paymentDate = paymentDates[period];
double periodLength = periodLengths != null ? periodLengths[period] : paymentDate - fixingDate;
// Get random variables - note that this is the rate at simulation time = exerciseDate
RandomVariable libor = model.getLIBOR(exerciseDate, fixingDate, paymentDate);
// Add payment received at end of period
RandomVariable payoff = new RandomVariableFromDoubleArray(paymentDate, 1.0 * periodLength);
valueFixLeg = valueFixLeg.add(payoff);
// Discount back to beginning of period
valueFloatLeg = valueFloatLeg.discount(libor, periodLength);
valueFixLeg = valueFixLeg.discount(libor, periodLength);
}
valueFloatLeg = valueFloatLeg.add(1.0);
RandomVariable parSwapRate = valueFloatLeg.div(valueFixLeg);
RandomVariable payoffUnit = new RandomVariableFromDoubleArray(paymentDates[0], periodLengths[0]);
payoffUnit = payoffUnit.discount(model.getLIBOR(exerciseDate, fixingDates[0], paymentDates[0]),paymentDates[0]-fixingDates[0]);
RandomVariable value = parSwapRate.sub(strike).floor(0.0).mult(payoffUnit);
// If the exercise date is not the first periods start date, then discount back to the exercise date (calculate the forward starting swap)
if(fixingDates[0] != exerciseDate) {
RandomVariable libor = model.getLIBOR(exerciseDate, exerciseDate, fixingDates[0]);
double periodLength = fixingDates[0] - exerciseDate;
// Discount back to beginning of period
value = value.discount(libor, periodLength);
}
/*
* Calculate value
*/
RandomVariable numeraire = model.getNumeraire(exerciseDate);
RandomVariable monteCarloProbabilities = model.getMonteCarloWeights(model.getTimeIndex(exerciseDate));
value = value.div(numeraire).mult(monteCarloProbabilities);
RandomVariable numeraireAtZero = model.getNumeraire(evaluationTime);
RandomVariable monteCarloProbabilitiesAtZero = model.getMonteCarloWeights(evaluationTime);
value = value.mult(numeraireAtZero).div(monteCarloProbabilitiesAtZero);
return value;
} |
java | @Override
public boolean delete(Path path, boolean recursive) throws IOException {
LOG.debug("delete({}, {})", path, recursive);
if (mStatistics != null) {
mStatistics.incrementWriteOps(1);
}
AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path));
DeletePOptions options = DeletePOptions.newBuilder().setRecursive(recursive).build();
try {
mFileSystem.delete(uri, options);
return true;
} catch (InvalidPathException | FileDoesNotExistException e) {
LOG.warn("delete failed: {}", e.getMessage());
return false;
} catch (AlluxioException e) {
throw new IOException(e);
}
} |
python | def run(migrations_path=None, url=None, port=None):
"""Run migrations"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if url:
url = str(url).rstrip('/')
options.url_registry_db = url
if port:
options.db_port = int(port)
if migrations_path:
migrations = migrate.collect(migrations_path)
else:
migrations = migrate.collect()
func = partial(migrate.run_migrations, migrations)
IOLoop.instance().run_sync(func) |
python | def setCurrentIndex(self, currentIndex):
""" Sets the current item to be the item at currentIndex.
Also select the row as to give consistent user feedback.
See also the notes at the top of this module on current item vs selected item(s).
"""
selectionModel = self.selectionModel()
selectionFlags = (QtCore.QItemSelectionModel.ClearAndSelect |
QtCore.QItemSelectionModel.Rows)
selectionModel.setCurrentIndex(currentIndex, selectionFlags) |
java | private static <T> void notifyPostInvoke(EJBRequestCollaborator<T> collaborator, EJBRequestData request, Object preInvokeData) throws CSIException {
try {
@SuppressWarnings("unchecked")
T uncheckedCookie = (T) preInvokeData;
collaborator.postInvoke(request, uncheckedCookie);
} catch (RuntimeException ex) {
throw ex;
} catch (CSIException ex) {
throw ex;
} catch (Exception ex) {
throw new CSIException("", ex);
}
} |
java | public int getWidth() {
if (format == ImageFormat.GIF && streamMetadata != null) {
Node screenDescNode = NodeUtils.getChild(streamMetadata, "LogicalScreenDescriptor");
if (screenDescNode != null) {
return NodeUtils.getIntAttr(screenDescNode, "logicalScreenWidth");
}
}
return getWidth(0);
} |
java | public int getKeyAsInt(int index) throws IOException {
if (index >= structure.keySizes.size()) {
throw new IOException("Index " + index + " is out of range.");
}
return Bytes.toInt(key, structure.keyByteOffsets.get(index));
} |
java | public void restoreRepository(File repositoryBackupSetDir, boolean asynchronous) throws BackupOperationException,
BackupConfigurationException
{
File[] cfs = PrivilegedFileHelper.listFiles(repositoryBackupSetDir, new RepositoryBackupLogsFilter());
if (cfs.length == 0)
{
throw new BackupConfigurationException("Can not found repository backup log in directory : "
+ repositoryBackupSetDir.getPath());
}
if (cfs.length > 1)
{
throw new BackupConfigurationException(
"Backup set directory should contains only one repository backup log : " + repositoryBackupSetDir.getPath());
}
RepositoryBackupChainLog backupChainLog = new RepositoryBackupChainLog(cfs[0]);
try
{
this.restore(backupChainLog, backupChainLog.getOriginalRepositoryEntry(), asynchronous);
}
catch (RepositoryException e)
{
throw new RepositoryRestoreExeption("Repository \"" + backupChainLog.getOriginalRepositoryEntry().getName()
+ "\" was not restored", e);
}
catch (RepositoryConfigurationException e)
{
throw new RepositoryRestoreExeption("Repository \"" + backupChainLog.getOriginalRepositoryEntry().getName()
+ "\" was not restored", e);
}
} |
python | def run(self):
"""
Compile all message catalogs .po files into .mo files.
Skips not changed file based on source mtime.
"""
# thanks to deluge guys ;)
po_dir = os.path.join(os.path.dirname(__file__), 'webant', 'translations')
print('Compiling po files from "{}"...'.format(po_dir))
for lang in os.listdir(po_dir):
sys.stdout.write("\tCompiling {}... ".format(lang))
sys.stdout.flush()
curr_lang_path = os.path.join(po_dir, lang)
for path, dirs, filenames in os.walk(curr_lang_path):
for f in filenames:
if f.endswith('.po'):
src = os.path.join(path, f)
dst = os.path.join(path, f[:-3] + ".mo")
if not os.path.exists(dst) or self.force:
msgfmt.make(src, dst)
print("ok.")
else:
src_mtime = os.stat(src)[8]
dst_mtime = os.stat(dst)[8]
if src_mtime > dst_mtime:
msgfmt.make(src, dst)
print("ok.")
else:
print("already up to date.")
print('Finished compiling translation files.') |
java | protected Object getFieldValue(String fieldname) {
ActionContext actionCtx = ActionContext.getContext();
ValueStack valueStack = actionCtx.getValueStack();
Object value = valueStack.findValue(fieldname, false);
String overwriteValue = getOverwriteValue(fieldname);
if ( overwriteValue != null ) {
return overwriteValue;
}
return value;
} |
python | def head(self, url: StrOrURL, *, allow_redirects: bool=False,
**kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP HEAD request."""
return _RequestContextManager(
self._request(hdrs.METH_HEAD, url,
allow_redirects=allow_redirects,
**kwargs)) |
java | public static void removeStateParam(String name, HttpServletRequest req,
HttpServletResponse res) {
setRawCookie(name, "", req, res, false, 0);
} |
python | def load_from_path(path: str) -> Config:
"""
Load a config from a file and ensure its structure.
Writes a default if necessary
"""
data = _ensure_load(path)
if not data:
data = {}
values, should_write = _ensure_values(data)
values.update({'path': path})
config = Config(**values)
if config.signature_required:
if not os.path.exists(config.update_cert_path):
LOG.warning(
f"No signing cert is present in {config.update_cert_path}, "
"code signature checking disabled")
config = config._replace(signature_required=False)
config = config._replace(update_cert_path=DEFAULT_CERT_PATH)
if should_write:
save_to_path(path, config)
return config |
java | private void initializeShowCustomBottomSheetPreference() {
Preference showCustomBottomSheetPreference =
findPreference(getString(R.string.show_custom_bottom_sheet_preference_key));
showCustomBottomSheetPreference
.setOnPreferenceClickListener(new OnPreferenceClickListener() {
@Override
public boolean onPreferenceClick(final Preference preference) {
initializeCustomBottomSheet();
customBottomSheet.show();
return true;
}
});
} |
python | def rest_delete(url, timeout, show_error=False):
'''Call rest delete method'''
try:
response = requests.delete(url, timeout=timeout)
return response
except Exception as exception:
if show_error:
print_error(exception)
return None |
java | private double link(double d1, double d2) {
if (isScoreMatrix) {
return Math.max(d1,d2);
} else {
return Math.min(d1,d2);
}
} |
python | def upsert(self, table, value, update_columns=None, commit=True):
"""
:type table: string
:type value: dict
:type update_columns: list
:param update_columns: specify the columns which will be updated if record exists
:type commit: bool
"""
if not isinstance(value, dict):
raise TypeError('Input value should be a dictionary')
if not update_columns:
update_columns = value.keys()
value_q, _args = self._value_parser(value, columnname=False)
_sql = ''.join(['INSERT INTO ', self._backtick(table), ' (', self._backtick_columns(value), ') VALUES ',
'(', value_q, ') ',
'ON DUPLICATE KEY UPDATE ',
', '.join(['='.join([k, 'VALUES('+k+')']) for k in update_columns]), ';'])
if self.debug:
return self.cur.mogrify(_sql, _args)
self.cur.execute(_sql, _args)
if commit:
self.conn.commit()
return self.cur.lastrowid |
java | public static String toHex(final byte[] buffer, final int offset, final int length)
{
return new String(toHexByteArray(buffer, offset, length), UTF_8);
} |
python | def initialize_object(B, res, row):
"""
Do a shallow initialization of an object
Arguments:
- row<dict>: dict of data like depth=1, i.e. many_refs are only ids
"""
B = get_backend()
field_groups = FieldGroups(B.get_concrete(res))
try:
obj = B.get_object(B.get_concrete(res), row['id'])
except B.object_missing_error(B.get_concrete(res)):
tbl = B.get_concrete(res)
obj = tbl()
# Set attributes, refs
for fname, field in field_groups['scalars'].items():
value = row.get(fname, getattr(obj, fname, None))
value = B.convert_field(obj.__class__, fname, value)
setattr(obj, fname, value)
# _debug('res, row: %s, %s', res, row)
# Already-fetched, and id-only refs
fetched, dangling = defaultdict(dict), defaultdict(set)
# To handle subrows that might be shallow (id) or deep (dict)
def _handle_subrow(R, subrow):
if isinstance(subrow, dict):
pk = subrow['id']
fetched[R][pk] = subrow
else:
pk = subrow
dangling[R].add(pk)
return pk
for fname, field in field_groups['one_refs'].items():
fieldres = _field_resource(B, B.get_concrete(res), fname)
key = field.column
subrow = row.get(key)
if subrow is None: # e.g. use "org" if "org_id" is missing
key = fname
subrow = row[key]
pk = _handle_subrow(fieldres, subrow)
setattr(obj, key, pk)
for fname, field in field_groups['many_refs'].items():
fieldres = _field_resource(B, B.get_concrete(res), fname)
pks = [
_handle_subrow(fieldres, subrow) for subrow in row.get(fname, [])
]
return obj, fetched, dangling |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.