language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public static Date getDateFromLong(long date)
{
TimeZone tz = TimeZone.getDefault();
return (new Date(date - tz.getRawOffset()));
} |
python | def search(self, pattern, minAddr = None, maxAddr = None):
"""
Search for the given pattern within the process memory.
@type pattern: str, compat.unicode or L{Pattern}
@param pattern: Pattern to search for.
It may be a byte string, a Unicode string, or an instance of
L{Pattern}.
The following L{Pattern} subclasses are provided by WinAppDbg:
- L{BytePattern}
- L{TextPattern}
- L{RegExpPattern}
- L{HexPattern}
You can also write your own subclass of L{Pattern} for customized
searches.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@rtype: iterator of tuple( int, int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The size of the data that matches the pattern.
- The data that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory.
"""
if isinstance(pattern, str):
return self.search_bytes(pattern, minAddr, maxAddr)
if isinstance(pattern, compat.unicode):
return self.search_bytes(pattern.encode("utf-16le"),
minAddr, maxAddr)
if isinstance(pattern, Pattern):
return Search.search_process(self, pattern, minAddr, maxAddr)
raise TypeError("Unknown pattern type: %r" % type(pattern)) |
java | public void marshall(AuthorizationConfig authorizationConfig, ProtocolMarshaller protocolMarshaller) {
if (authorizationConfig == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(authorizationConfig.getAuthorizationType(), AUTHORIZATIONTYPE_BINDING);
protocolMarshaller.marshall(authorizationConfig.getAwsIamConfig(), AWSIAMCONFIG_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public void start() {
this.factoryTracker.open();
Dictionary<String, String> props = new Hashtable<String, String>();
props.put(Constants.APPLICATION_NAME, applicationName);
serviceRegistration = paxWicketBundleContext.registerService(PaxWicketInjector.class, this, props);
} |
java | public boolean setBranchKey(String key) {
Branch_Key = key;
String currentBranchKey = getString(KEY_BRANCH_KEY);
if (key == null || currentBranchKey == null || !currentBranchKey.equals(key)) {
clearPrefOnBranchKeyChange();
setString(KEY_BRANCH_KEY, key);
return true;
}
return false;
} |
java | @Override
public void setValue(final List<T> value) {
if (list == null && value == null) {
// fast exit
return;
}
if (list != null && list.isSameValue(value)) {
// setting the same value as the one being edited
list.refresh();
return;
}
if (list != null) {
// Having entire value reset, so dump the wrapper gracefully
list.detach();
}
if (value == null) {
list = null;
} else {
list = new ListValidationEditorWrapper<>(value, chain, editorSource, parentDriver);
list.attach();
}
} |
python | def find_resource_class(resource_path):
"""
dynamically load a class from a string
"""
class_path = ResourceTypes[resource_path]
# First prepend our __name__ to the resource string passed in.
full_path = '.'.join([__name__, class_path])
class_data = full_path.split(".")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
# Finally, we retrieve the Class
return getattr(module, class_str) |
python | def aa_counts(aln, weights=None, gap_chars='-.'):
"""Calculate the amino acid frequencies in a set of SeqRecords.
Weights for each sequence in the alignment can be given as a list/tuple,
usually calculated with the sequence_weights function. For convenience, you
can also pass "weights=True" and the weights will be calculated with
sequence_weights here.
"""
if weights is None:
counts = Counter()
for rec in aln:
seq_counts = Counter(str(rec.seq))
counts.update(seq_counts)
else:
if weights == True:
# For convenience
weights = sequence_weights(aln)
else:
assert len(weights) == len(aln), (
"Length mismatch: weights = %d, alignment = %d"
% (len(weights), len(aln)))
counts = defaultdict(float)
for col in zip(*aln):
for aa, wt in zip(col, weights):
counts[aa] += wt
# Don't count gaps
for gap_char in gap_chars:
if gap_char in counts:
del counts[gap_char]
return counts |
java | public final void short_key() throws RecognitionException {
Token id=null;
try {
// src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:772:5: ({...}? =>id= ID )
// src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:772:12: {...}? =>id= ID
{
if ( !(((helper.validateIdentifierKey(DroolsSoftKeywords.SHORT)))) ) {
if (state.backtracking>0) {state.failed=true; return;}
throw new FailedPredicateException(input, "short_key", "(helper.validateIdentifierKey(DroolsSoftKeywords.SHORT))");
}
id=(Token)match(input,ID,FOLLOW_ID_in_short_key4911); if (state.failed) return;
if ( state.backtracking==0 ) { helper.emit(id, DroolsEditorType.KEYWORD); }
}
}
catch (RecognitionException re) {
throw re;
}
finally {
// do for sure before leaving
}
} |
java | public void performEpilogue(ActionRuntime runtime) { // fixed process
if (runtime.isForwardToHtml()) {
arrangeNoCacheResponseWhenJsp(runtime);
}
handleSqlCount(runtime);
handleMailCount(runtime);
handleRemoteApiCount(runtime);
clearCallbackContext();
clearPreparedAccessContext();
} |
java | public Map<TimestampMode, List<String>> getDefaultTimestampModes() {
Map<TimestampMode, List<String>> result = new HashMap<TimestampMode, List<String>>();
for (String resourcetype : m_defaultTimestampModes.keySet()) {
TimestampMode mode = m_defaultTimestampModes.get(resourcetype);
if (result.containsKey(mode)) {
result.get(mode).add(resourcetype);
} else {
List<String> list = new ArrayList<String>();
list.add(resourcetype);
result.put(mode, list);
}
}
return result;
} |
python | def allele_plot(file, normalize=False, alleles=None, generations=None):
"""Plot the alleles from each generation from the individuals file.
This function creates a plot of the individual allele values as they
change through the generations. It creates three subplots, one for each
of the best, median, and average individual. The best and median
individuals are chosen using the fitness data for each generation. The
average individual, on the other hand, is actually an individual created
by averaging the alleles within a generation. This function requires the
matplotlib library.
.. note::
This function only works for single-objective problems.
.. figure:: _static/allele_plot.png
:alt: Example allele plot
:align: center
An example image saved from the ``allele_plot`` function.
Arguments:
- *file* -- a file-like object representing the individuals file
produced by the file_observer
- *normalize* -- Boolean value stating whether allele values should be
normalized before plotting (default False)
- *alleles* -- a list of allele index values that should be plotted
(default None)
- *generations* -- a list of generation numbers that should be plotted
(default None)
If *alleles* is ``None``, then all alleles are plotted. Similarly, if
*generations* is ``None``, then all generations are plotted.
"""
import matplotlib.pyplot as plt
generation_data = []
reader = csv.reader(open(file))
for row in reader:
g = int(row[0])
row[3] = row[3].replace('[', '')
row[-1] = row[-1].replace(']', '')
individual = [float(r) for r in row[3:]]
individual.append(float(row[2]))
try:
generation_data[g]
except IndexError:
generation_data.append([])
generation_data[g].append(individual)
for gen in generation_data:
gen.sort(key=lambda x: x[-1])
for j, g in enumerate(gen):
gen[j] = g[:-1]
best = []
median = []
average = []
for gen in generation_data:
best.append(gen[0])
plen = len(gen)
if plen % 2 == 1:
med = gen[(plen - 1) // 2]
else:
med = []
for a, b in zip(gen[plen // 2 - 1], gen[plen // 2]):
med.append(float(a + b) / 2)
median.append(med)
avg = [0] * len(gen[0])
for individual in gen:
for i, allele in enumerate(individual):
avg[i] += allele
for i, a in enumerate(avg):
avg[i] /= float(len(gen))
average.append(avg)
for plot_num, (data, title) in enumerate(zip([best, median, average],
["Best", "Median", "Average"])):
if alleles is None:
alleles = list(range(len(data[0])))
if generations is None:
generations = list(range(len(data)))
if normalize:
columns = list(zip(*data))
max_col = [max(c) for c in columns]
min_col = [min(c) for c in columns]
for dat in data:
for i, d in enumerate(dat):
dat[i] = (d - min_col[i]) / float(max_col[i] - min_col[i])
plot_data = []
for g in generations:
plot_data.append([data[g][a] for a in alleles])
sub = plt.subplot(3, 1, plot_num + 1)
plt.pcolor(plt.array(plot_data))
plt.colorbar()
step_size = max(len(generations) // 7, 1)
ytick_locs = list(range(step_size, len(generations), step_size))
ytick_labs = generations[step_size::step_size]
plt.yticks(ytick_locs, ytick_labs)
plt.ylabel('Generation')
if plot_num == 2:
xtick_locs = list(range(len(alleles)))
xtick_labs = alleles
plt.xticks(xtick_locs, xtick_labs)
plt.xlabel('Allele')
else:
plt.setp(sub.get_xticklabels(), visible=False)
plt.title(title)
plt.show() |
python | def fast_spearman(x, y=None, destination=None):
"""calculate the spearman correlation matrix for the columns of x (with dimensions MxN), or optionally, the spearman correlaton
matrix between the columns of x and the columns of y (with dimensions OxP). If destination is provided, put the results there.
In the language of statistics the columns are the variables and the rows are the observations.
Args:
x (numpy array-like) MxN in shape
y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y)
destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy
memmap of a file)
returns:
(numpy array-like) array of the covariance values
for defaults (y=None), shape is NxN
if y is provied, shape is NxP
"""
logger.debug("x.shape: {}".format(x.shape))
if hasattr(y, "shape"):
logger.debug("y.shape: {}".format(y.shape))
x_ranks = pandas.DataFrame(x).rank(method="average").values
logger.debug("some min and max ranks of x_ranks:\n{}\n{}".format(numpy.min(x_ranks[:10], axis=0), numpy.max(x_ranks[:10], axis=0)))
y_ranks = pandas.DataFrame(y).rank(method="average").values if y is not None else None
return fast_corr(x_ranks, y_ranks, destination) |
java | public static Concept deserialiseConcept(String s) {
if(unmarshaller == null) init();
try {
Object res = unmarshaller.unmarshal(new ByteArrayInputStream(s.getBytes()));
return (Concept) res;
} catch(JAXBException e) {
log.error("There was a problem deserialising a concept. JAXB threw an exception.", e);
throw new RuntimeException(e);
}
} |
java | @Override
public ExportBundleResult exportBundle(ExportBundleRequest request) {
request = beforeClientExecution(request);
return executeExportBundle(request);
} |
java | public static Dynamic ofField(FieldDescription.InDefinedShape fieldDescription) {
if (!fieldDescription.isStatic() || !fieldDescription.isFinal()) {
throw new IllegalArgumentException("Field must be static and final: " + fieldDescription);
}
boolean selfDeclared = fieldDescription.getType().isPrimitive()
? fieldDescription.getType().asErasure().asBoxed().equals(fieldDescription.getType().asErasure())
: fieldDescription.getDeclaringType().equals(fieldDescription.getType().asErasure());
return new Dynamic(new ConstantDynamic(fieldDescription.getInternalName(),
fieldDescription.getDescriptor(),
new Handle(Opcodes.H_INVOKESTATIC,
CONSTANT_BOOTSTRAPS,
"getStaticFinal",
selfDeclared
? "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/Class;)Ljava/lang/Object;"
: "(Ljava/lang/invoke/MethodHandles$Lookup;Ljava/lang/String;Ljava/lang/Class;Ljava/lang/Class;)Ljava/lang/Object;",
false), selfDeclared
? new Object[0]
: new Object[]{Type.getType(fieldDescription.getDeclaringType().getDescriptor())}), fieldDescription.getType().asErasure());
} |
python | def genty_dataprovider(builder_function):
"""Decorator defining that this test gets parameters from the given
build_function.
:param builder_function:
A callable that returns parameters that will be passed to the method
decorated by this decorator.
If the builder_function returns a tuple or list, then that will be
passed as *args to the decorated method.
If the builder_function returns a :class:`GentyArgs`, then that will
be used to pass *args and **kwargs to the decorated method.
Any other return value will be treated as a single parameter, and
passed as such to the decorated method.
:type builder_function:
`callable`
"""
datasets = getattr(builder_function, 'genty_datasets', {None: ()})
def wrap(test_method):
# Save the data providers in the test method. This data will be
# consumed by the @genty decorator.
if not hasattr(test_method, 'genty_dataproviders'):
test_method.genty_dataproviders = []
test_method.genty_dataproviders.append(
(builder_function, datasets),
)
return test_method
return wrap |
python | def postprocess_keyevent(self, event):
"""Process keypress event"""
ShellBaseWidget.postprocess_keyevent(self, event)
if QToolTip.isVisible():
_event, _text, key, _ctrl, _shift = restore_keyevent(event)
self.hide_tooltip_if_necessary(key) |
python | def list_quantum_computers(connection: ForestConnection = None,
qpus: bool = True,
qvms: bool = True) -> List[str]:
"""
List the names of available quantum computers
:param connection: An optional :py:class:ForestConnection` object. If not specified,
the default values for URL endpoints will be used, and your API key
will be read from ~/.pyquil_config. If you deign to change any
of these parameters, pass your own :py:class:`ForestConnection` object.
:param qpus: Whether to include QPU's in the list.
:param qvms: Whether to include QVM's in the list.
"""
if connection is None:
connection = ForestConnection()
qc_names: List[str] = []
if qpus:
qc_names += list(list_lattices(connection=connection).keys())
if qvms:
qc_names += ['9q-square-qvm', '9q-square-noisy-qvm']
return qc_names |
java | public static <Vertex> BiDiNavigator<SCComponent<Vertex>> getSccBiDiNavigator() {
return
new BiDiNavigator<SCComponent<Vertex>>() {
public List<SCComponent<Vertex>> next(SCComponent<Vertex> scc) {
return scc.next();
}
public List<SCComponent<Vertex>> prev(SCComponent<Vertex> scc) {
return scc.prev();
}
};
} |
java | public static boolean isEmptyAll(String... args) {
for (String arg : args) {
if (!isEmpty(arg)) {
return false;
}
}
return true;
} |
python | def _sanitise_list(arg_list):
"""A generator for iterating through a list of gpg options and sanitising
them.
:param list arg_list: A list of options and flags for GnuPG.
:rtype: generator
:returns: A generator whose next() method returns each of the items in
``arg_list`` after calling ``_sanitise()`` with that item as a
parameter.
"""
if isinstance(arg_list, list):
for arg in arg_list:
safe_arg = _sanitise(arg)
if safe_arg != "":
yield safe_arg |
python | def _get_tau(self, C, mag):
"""
Returns magnitude dependent inter-event standard deviation (tau)
(equation 14)
"""
if mag < 6.5:
return C["tau1"]
elif mag < 7.:
return C["tau1"] + (C["tau2"] - C["tau1"]) * ((mag - 6.5) / 0.5)
else:
return C["tau2"] |
java | private static int extractInt(byte[] arr, int offset) {
return ((arr[offset] & 0xFF) << 24) | ((arr[offset + 1] & 0xFF) << 16) | ((arr[offset + 2] & 0xFF) << 8)
| (arr[offset + 3] & 0xFF);
} |
python | def create_method(self):
"""
Build the estimator method or function.
Returns
-------
:return : string
The built method as string.
"""
n_indents = 0 if self.target_language in ['c', 'go'] else 1
method_type = 'separated.{}.method'.format(self.prefix)
method_temp = self.temp(method_type, n_indents=n_indents,
skipping=True)
return method_temp.format(**self.__dict__) |
python | def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all packages in the available
low chunks and merges them into a single pkgs ref in the present low data
'''
pkgs = []
pkg_type = None
agg_enabled = [
'installed',
'removed',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = __utils__['state.gen_tag'](chunk)
if tag in running:
# Already ran the pkg state, skip aggregation
continue
if chunk.get('state') == 'pip':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
# Check first if 'sources' was passed so we don't aggregate pkgs
# and sources together.
if pkg_type is None:
pkg_type = 'pkgs'
if pkg_type == 'pkgs':
# Pull out the pkg names!
if 'pkgs' in chunk:
pkgs.extend(chunk['pkgs'])
chunk['__agg__'] = True
elif 'name' in chunk:
version = chunk.pop('version', None)
if version is not None:
pkgs.append({chunk['name']: version})
else:
pkgs.append(chunk['name'])
chunk['__agg__'] = True
if pkg_type is not None and pkgs:
if pkg_type in low:
low[pkg_type].extend(pkgs)
else:
low[pkg_type] = pkgs
return low |
java | private void checkIndices(int row, int col, boolean expand) {
if (row < 0 || col < 0) {
throw new ArrayIndexOutOfBoundsException();
}
if (expand) {
int r = row + 1;
int cur = 0;
while (r > (cur = rows.get()) && !rows.compareAndSet(cur, r))
;
int c = col + 1;
cur = 0;
while (c > (cur = cols.get()) && !cols.compareAndSet(cur, c))
;
}
} |
python | def wait_for_tasks(self, raise_if_error=True):
"""
Wait for the running tasks lauched from the sessions.
Note that it also wait for tasks that are started from other tasks
callbacks, like on_finished.
:param raise_if_error: if True, raise all possible encountered
errors using :class:`TaskErrors`. Else the errors are returned
as a list.
"""
errors = []
tasks_seen = TaskCache()
while True:
for session in self.values():
errs = session.wait_for_tasks(raise_if_error=False)
errors.extend(errs)
# look for tasks created after the wait (in callbacks of
# tasks from different sessions)
tasks = []
for session in self.values():
tasks.extend(session.tasks())
# if none, then just break - else loop to wait for them
if not any(t for t in tasks if t not in tasks_seen):
break
if raise_if_error and errors:
raise TaskErrors(errors)
return errors |
python | def _check_types(func_name, types, func_args, defaults):
"""Make sure that enough types were given to ensure conversion. Also remove
potential Self/Class arguments.
Args:
func_name: name of the decorated function
types: a list of Python types to which the argument will be converted
func_args: list of function arguments name
defaults: tuple of default values for the function argument
Raises:
ParseThisError: if the number of types for conversion does not match
the number of function's arguments
"""
defaults = defaults or []
if len(types) > len(func_args):
raise ParseThisError("To many types provided for conversion for '{}'."
.format(func_name))
if len(types) < len(func_args) - len(defaults):
raise ParseThisError("Not enough types provided for conversion for '{}'"
.format(func_name))
if types and types[0] in [Self, Class]:
types = types[1:]
func_args = func_args[1:]
return (types, func_args) |
java | public synchronized boolean contains(EvidenceType type, Confidence confidence) {
if (null == type) {
return false;
}
final Set<Evidence> col;
switch (type) {
case VENDOR:
col = vendors;
break;
case PRODUCT:
col = products;
break;
case VERSION:
col = versions;
break;
default:
return false;
}
for (Evidence e : col) {
if (e.getConfidence().equals(confidence)) {
return true;
}
}
return false;
} |
java | @Override
public VALUE get(KEY key) {
return key != null ? getMap().get(key) : null;
} |
java | public Integer extractArchiveDateCount(String datePattern) {
int dateCount = 0;
String[] splits = datePattern.split("d|M|y");
if (splits.length > 0) {
dateCount = dateCount + Integer.valueOf(splits[0]);
}
if (splits.length > 1) {
dateCount = dateCount + (Integer.valueOf(splits[1]) * 30);
}
if (splits.length > 2) {
dateCount = dateCount + (Integer.valueOf(splits[2]) * 365);
}
return dateCount;
} |
python | def async_send(self, url, data, headers, success_cb, failure_cb):
"""
Spawn an async request to a remote webserver.
"""
# this can be optimized by making a custom self.send that does not
# read the response since we don't use it.
self._lock.acquire()
return gevent.spawn(
super(GeventedHTTPTransport, self).send, url, data, headers
).link(lambda x: self._done(x, success_cb, failure_cb)) |
java | public Observable<RouteFilterRuleInner> updateAsync(String resourceGroupName, String routeFilterName, String ruleName, PatchRouteFilterRule routeFilterRuleParameters) {
return updateWithServiceResponseAsync(resourceGroupName, routeFilterName, ruleName, routeFilterRuleParameters).map(new Func1<ServiceResponse<RouteFilterRuleInner>, RouteFilterRuleInner>() {
@Override
public RouteFilterRuleInner call(ServiceResponse<RouteFilterRuleInner> response) {
return response.body();
}
});
} |
java | public void setAvailablePolicyTypes(java.util.Collection<PolicyTypeSummary> availablePolicyTypes) {
if (availablePolicyTypes == null) {
this.availablePolicyTypes = null;
return;
}
this.availablePolicyTypes = new java.util.ArrayList<PolicyTypeSummary>(availablePolicyTypes);
} |
python | def create_version(self, version_label):
'''
method to create a new version of the resource as it currently stands
- Note: this will create a version based on the current live instance of the resource,
not the local version, which might require self.update() to update.
Args:
version_label (str): label to be used for version
Returns:
(ResourceVersion): instance of ResourceVersion, also appended to self.versions
'''
# create version
version_response = self.repo.api.http_request('POST', '%s/fcr:versions' % self.uri, data=None, headers={'Slug':version_label})
# if 201, assume success
if version_response.status_code == 201:
logger.debug('version created: %s' % version_response.headers['Location'])
# affix version
self._affix_version(version_response.headers['Location'], version_label) |
java | @Override
public java.util.List<com.liferay.commerce.product.model.CProduct> getCProducts(
int start, int end) {
return _cProductLocalService.getCProducts(start, end);
} |
java | private void addInitialFactPattern( final GroupElement subrule ) {
// creates a pattern for initial fact
final Pattern pattern = new Pattern( 0,
ClassObjectType.InitialFact_ObjectType );
// adds the pattern as the first child of the given AND group element
subrule.addChild( 0,
pattern );
} |
java | protected void runAsyncWithoutFencing(Runnable runnable) {
if (rpcServer instanceof FencedMainThreadExecutable) {
((FencedMainThreadExecutable) rpcServer).runAsyncWithoutFencing(runnable);
} else {
throw new RuntimeException("FencedRpcEndpoint has not been started with a FencedMainThreadExecutable RpcServer.");
}
} |
python | def range(self, key, size=None, unique=True):
"""Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
"""
all_nodes = set()
if unique:
size = size or len(self.runtime._nodes)
else:
all_nodes = []
pos = self._get_pos(key)
for key in self.runtime._keys[pos:]:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
else:
for i, key in enumerate(self.runtime._keys):
if i < pos:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break |
python | def _create_m_objective(w, X):
"""
Creates an objective function and its derivative for M, given W and X
Args:
w (array): clusters x cells
X (array): genes x cells
"""
clusters, cells = w.shape
genes = X.shape[0]
w_sum = w.sum(1)
def objective(m):
m = m.reshape((X.shape[0], w.shape[0]))
d = m.dot(w)+eps
temp = X/d
w2 = w.dot(temp.T)
deriv = w_sum - w2.T
return np.sum(d - X*np.log(d))/genes, deriv.flatten()/genes
return objective |
java | public ArrayList<OvhRadiusConnectionLog> serviceName_radiusConnectionLogs_GET(String serviceName) throws IOException {
String qPath = "/xdsl/{serviceName}/radiusConnectionLogs";
StringBuilder sb = path(qPath, serviceName);
String resp = exec(qPath, "GET", sb.toString(), null);
return convertTo(resp, t17);
} |
python | def parse_coach_go(infile):
"""Parse a GO output file from COACH and return a rank-ordered list of GO term predictions
The columns in all files are: GO terms, Confidence score, Name of GO terms. The files are:
- GO_MF.dat - GO terms in 'molecular function'
- GO_BP.dat - GO terms in 'biological process'
- GO_CC.dat - GO terms in 'cellular component'
Args:
infile (str): Path to any COACH GO prediction file
Returns:
Pandas DataFrame: Organized dataframe of results, columns defined below
- ``go_id``: GO term ID
- ``go_term``: GO term text
- ``c_score``: confidence score of the GO prediction
"""
go_list = []
with open(infile) as go_file:
for line in go_file.readlines():
go_dict = {}
go_split = line.split()
go_dict['go_id'] = go_split[0]
go_dict['c_score'] = go_split[1]
go_dict['go_term'] = ' '.join(go_split[2:])
go_list.append(go_dict)
return go_list |
java | public static Class<?> erase(Type type)
{
if (type instanceof Class<?>)
{
return (Class<?>) type;
}
else if (type instanceof ParameterizedType)
{
return (Class<?>) ((ParameterizedType) type).getRawType();
}
else if (type instanceof TypeVariable<?>)
{
TypeVariable<?> tv = (TypeVariable<?>) type;
if (tv.getBounds().length == 0)
return Object.class;
else
return erase(tv.getBounds()[0]);
}
else if (type instanceof GenericArrayType)
{
GenericArrayType aType = (GenericArrayType) type;
return GenericArrayTypeImpl.createArrayType(erase(aType.getGenericComponentType()));
}
else
{
// TODO at least support CaptureType here
throw new RuntimeException("not supported: " + type.getClass());
}
} |
java | public CMASpace fetchOne(String spaceId) {
assertNotNull(spaceId, "spaceId");
return service.fetchOne(spaceId).blockingFirst();
} |
python | def export(self, name, columns, points):
"""Write the points in RabbitMQ."""
data = ('hostname=' + self.hostname + ', name=' + name +
', dateinfo=' + datetime.datetime.utcnow().isoformat())
for i in range(len(columns)):
if not isinstance(points[i], Number):
continue
else:
data += ", " + columns[i] + "=" + str(points[i])
logger.debug(data)
try:
self.client.basic_publish(exchange='', routing_key=self.queue, body=data)
except Exception as e:
logger.error("Can not export stats to RabbitMQ (%s)" % e) |
python | def expand(self, id):
""" Expand a concept or collection to all it's narrower concepts.
If the id passed belongs to a :class:`skosprovider.skos.Concept`,
the id of the concept itself should be include in the return value.
:param str id: A concept or collection id.
:returns: A :class:`lst` of id's. Returns false if the input id does not exists
"""
query = """SELECT DISTINCT ?Id{
{
?Subject dc:identifier ?Id; skos:inScheme %s:; gvp:broaderExtended %s;.
}
UNION
{
VALUES ?Id {'%s'}
?Subject dc:identifier ?Id; skos:inScheme %s:; rdf:type skos:Concept.
}
}
""" % (self.vocab_id, self.vocab_id + ":" + id, id, self.vocab_id)
print (query)
res = self.session.get(self.base_url + "sparql.json", params={"query": query})
res.encoding = 'utf-8'
r = res.json()
result = [result['Id']['value'] for result in r['results']['bindings']]
if len(result) == 0 and self.get_by_id(id) is False:
return False
return result |
python | def Param(name, value=None, unit=None, ucd=None, dataType=None, utype=None,
ac=True):
"""
'Parameter', used as a general purpose key-value entry in the 'What' section.
May be assembled into a :class:`Group`.
NB ``name`` is not mandated by schema, but *is* mandated in full spec.
Args:
value(str): String representing parameter value.
Or, if ``ac`` is true, then 'autoconversion' is attempted, in which case
``value`` can also be an instance of one of the following:
* :py:obj:`bool`
* :py:obj:`int`
* :py:obj:`float`
* :py:class:`datetime.datetime`
This allows you to create Params without littering your code
with string casts, or worrying if the passed value is a float or a
string, etc.
NB the value is always *stored* as a string representation,
as per VO spec.
unit(str): Units of value. See :class:`.definitions.units`
ucd(str): `unified content descriptor <http://arxiv.org/abs/1110.0525>`_.
For a list of valid UCDs, see:
http://vocabularies.referata.com/wiki/Category:IVOA_UCD.
dataType(str): Denotes type of ``value``; restricted to 3 options:
``string`` (default), ``int`` , or ``float``.
(NB *not* to be confused with standard XML Datatypes, which have many
more possible values.)
utype(str): See http://wiki.ivoa.net/twiki/bin/view/IVOA/Utypes
ac(bool): Attempt automatic conversion of passed ``value`` to string,
and set ``dataType`` accordingly (only attempted if ``dataType``
is the default, i.e. ``None``).
(NB only supports types listed in _datatypes_autoconversion dict)
"""
# We use locals() to allow concise looping over the arguments.
atts = locals()
atts.pop('ac')
temp_dict = {}
temp_dict.update(atts)
for k in temp_dict.keys():
if atts[k] is None:
del atts[k]
if (ac
and value is not None
and (not isinstance(value, string_types))
and dataType is None
):
if type(value) in _datatypes_autoconversion:
datatype, func = _datatypes_autoconversion[type(value)]
atts['dataType'] = datatype
atts['value'] = func(value)
return objectify.Element('Param', attrib=atts) |
java | public void marshall(CreateEndpointConfigRequest createEndpointConfigRequest, ProtocolMarshaller protocolMarshaller) {
if (createEndpointConfigRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(createEndpointConfigRequest.getEndpointConfigName(), ENDPOINTCONFIGNAME_BINDING);
protocolMarshaller.marshall(createEndpointConfigRequest.getProductionVariants(), PRODUCTIONVARIANTS_BINDING);
protocolMarshaller.marshall(createEndpointConfigRequest.getTags(), TAGS_BINDING);
protocolMarshaller.marshall(createEndpointConfigRequest.getKmsKeyId(), KMSKEYID_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | protected Element createGroup(String namespace, Object parent, Object group, String type) {
Element parentElement;
if (parent == null) {
parentElement = getRootElement();
} else {
parentElement = getGroup(parent);
}
if (parentElement == null) {
return null;
} else {
Element element;
String id = Dom.createUniqueId();
if (group instanceof PaintableGroup) {
id = Dom.assembleId(id, ((PaintableGroup) group).getGroupName());
}
groupToId.put(group, id);
idToGroup.put(id, group);
if (Dom.NS_HTML.equals(namespace)) {
element = Dom.createElement("div", id);
} else {
element = Dom.createElementNS(namespace, type, id);
}
parentElement.appendChild(element);
return element;
}
} |
python | def raw_custom_event(sender, event_name,
payload,
user,
send_hook_meta=True,
instance=None,
**kwargs):
"""
Give a full payload
"""
HookModel = get_hook_model()
hooks = HookModel.objects.filter(user=user, event=event_name)
for hook in hooks:
new_payload = payload
if send_hook_meta:
new_payload = {
'hook': hook.dict(),
'data': payload
}
hook.deliver_hook(instance, payload_override=new_payload) |
java | public int install(int appId, int spaceId) {
return getResourceFactory()
.getApiResource("/app/" + appId + "/install")
.entity(new ApplicationInstall(spaceId),
MediaType.APPLICATION_JSON_TYPE)
.post(ApplicationCreateResponse.class).getId();
} |
python | def jac_uniform(X, cells):
"""The approximated Jacobian is
partial_i E = 2/(d+1) (x_i int_{omega_i} rho(x) dx - int_{omega_i} x rho(x) dx)
= 2/(d+1) sum_{tau_j in omega_i} (x_i - b_{j, rho}) int_{tau_j} rho,
see Chen-Holst. This method here assumes uniform density, rho(x) = 1, such that
partial_i E = 2/(d+1) sum_{tau_j in omega_i} (x_i - b_j) |tau_j|
with b_j being the ordinary barycenter.
"""
dim = 2
mesh = MeshTri(X, cells)
jac = numpy.zeros(X.shape)
for k in range(mesh.cells["nodes"].shape[1]):
i = mesh.cells["nodes"][:, k]
fastfunc.add.at(
jac,
i,
((mesh.node_coords[i] - mesh.cell_barycenters).T * mesh.cell_volumes).T,
)
return 2 / (dim + 1) * jac |
java | public static HttpUrl presignV4(Request request, String region, String accessKey, String secretKey, int expires)
throws NoSuchAlgorithmException, InvalidKeyException {
String contentSha256 = "UNSIGNED-PAYLOAD";
DateTime date = DateFormat.AMZ_DATE_FORMAT.parseDateTime(request.header("x-amz-date"));
Signer signer = new Signer(request, contentSha256, date, region, accessKey, secretKey, null);
signer.setScope();
signer.setPresignCanonicalRequest(expires);
signer.setStringToSign();
signer.setSigningKey();
signer.setSignature();
return signer.url.newBuilder()
.addEncodedQueryParameter(S3Escaper.encode("X-Amz-Signature"), S3Escaper.encode(signer.signature))
.build();
} |
java | public void writeSmallShortArray( short[] values ) {
int byteSize = values.length * 2 + 1;
this.addUnsignedByte( ( short ) values.length );
doWriteShortArray( values, byteSize );
} |
java | public void writeMBeanQuery(OutputStream out, MBeanQuery value) throws IOException {
writeStartObject(out);
writeObjectNameField(out, OM_OBJECTNAME, value.objectName);
// TODO: Produce proper JSON for QueryExp?
writeSerializedField(out, OM_QUERYEXP, value.queryExp);
writeStringField(out, OM_CLASSNAME, value.className);
writeEndObject(out);
} |
python | def _update_with_merge_lists(self, other, key, val=None, **options):
"""
Similar to _update_with_merge but merge lists always.
:param self: mapping object to update with 'other'
:param other: mapping object to update 'self'
:param key: key of mapping object to update
:param val: value to update self alternatively
:return: None but 'self' will be updated
"""
_update_with_merge(self, other, key, val=val, merge_lists=True, **options) |
java | @Override
public Object eGet(int featureID, boolean resolve, boolean coreType) {
switch (featureID) {
case AfplibPackage.CFI__FIXED_LENGTH_RG:
return getFixedLengthRG();
}
return super.eGet(featureID, resolve, coreType);
} |
python | def cli(wio):
'''
Login with your Wio account.
\b
DOES:
Login and save an access token for interacting with your account on the Wio.
\b
USE:
wio login
'''
mserver = wio.config.get("mserver", None)
if mserver:
click.echo(click.style('> ', fg='green') + "Current server is: " +
click.style(mserver, fg='green'))
if click.confirm(click.style('Would you like login with a different server?', bold=True), default=False):
choise_server(wio)
else:
choise_server(wio)
if wio.config.get("server") == 'Customize':
email = click.prompt(click.style('? ', fg='green') +
click.style('Please enter your email address', bold=True), type=str)
password = click.prompt(click.style('? ', fg='green') +
click.style('Please enter your password', bold=True), hide_input=True, type=str)
server_url = wio.config.get("mserver")
thread = termui.waiting_echo("Sending login details...")
thread.daemon = True
thread.start()
try:
json_response = login_wio(server_url, email, password)
token = json_response['token']
except Exception as e:
thread.stop('')
thread.join()
click.secho(">> %s" %e, fg='red')
return
else:
token = click.prompt(click.style('? ', fg='green') +
click.style('First get wio user token from ', bold=True) +
click.style('https://wio.seeed.io/login', bold=True, fg='green') +
click.style('\n? ', fg='green') +
click.style('Then enter token', bold=True), type=str)
email = ''
thread = termui.waiting_echo("Checking validity of token...")
thread.daemon = True
thread.start()
try:
check_token(wio.config.get("mserver"), token)
except Exception as e:
thread.stop('')
thread.join()
click.secho(">> %s" %e, fg='red')
return
# res = login_seeed(email, password) #TODO(ten): next time delete
# token = res['data']['token']
# user_id = res['data']['userid']
# ext_user(wio.config.get("mserver"), email, user_id, token)
wio.set_config('email', email)
wio.set_config('token', token)
thread.stop('')
thread.join()
click.secho("\r> ", fg='green', nl=False)
click.echo("Successfully completed login! Check state, see 'wio state'") |
java | public Observable<List<ExplicitListItem>> getExplicitListAsync(UUID appId, String versionId, UUID entityId) {
return getExplicitListWithServiceResponseAsync(appId, versionId, entityId).map(new Func1<ServiceResponse<List<ExplicitListItem>>, List<ExplicitListItem>>() {
@Override
public List<ExplicitListItem> call(ServiceResponse<List<ExplicitListItem>> response) {
return response.body();
}
});
} |
python | def update_warning(self):
"""
Updates the icon and tip based on the validity of the array content.
"""
widget = self._button_warning
if not self.is_valid():
tip = _('Array dimensions not valid')
widget.setIcon(ima.icon('MessageBoxWarning'))
widget.setToolTip(tip)
QToolTip.showText(self._widget.mapToGlobal(QPoint(0, 5)), tip)
else:
self._button_warning.setToolTip('') |
python | def update_dict(self, base: Dict[str, LinkItem], new: Dict[str, LinkItem]):
"""
Use for updating save state dicts and get the new save state dict. Provides a debug option at info level.
Updates the base dict. Basically executes `base.update(new)`.
:param base: base dict **gets overridden!**
:type base: Dict[str, ~unidown.plugin.link_item.LinkItem]
:param new: data which updates the base
:type new: Dict[str, ~unidown.plugin.link_item.LinkItem]
"""
if logging.INFO >= logging.getLevelName(dynamic_data.LOG_LEVEL): # TODO: logging here or outside
for link, item in new.items():
if link in base:
self.log.info('Actualize item: ' + link + ' | ' + str(base[link]) + ' -> ' + str(item))
base.update(new) |
java | public AsyncContext startAsync(ServletRequest servletRequest,
ServletResponse servletResponse)
throws IllegalStateException {
return request.startAsync(servletRequest, servletResponse);
} |
java | public Buffer copyTo(Buffer out, long offset, long byteCount) {
if (out == null) throw new IllegalArgumentException("out == null");
checkOffsetAndCount(size, offset, byteCount);
if (byteCount == 0) return this;
out.size += byteCount;
// Skip segments that we aren't copying from.
Segment s = head;
for (; offset >= (s.limit - s.pos); s = s.next) {
offset -= (s.limit - s.pos);
}
// Copy one segment at a time.
for (; byteCount > 0; s = s.next) {
Segment copy = new Segment(s);
copy.pos += offset;
copy.limit = Math.min(copy.pos + (int) byteCount, copy.limit);
if (out.head == null) {
out.head = copy.next = copy.prev = copy;
} else {
out.head.prev.push(copy);
}
byteCount -= copy.limit - copy.pos;
offset = 0;
}
return this;
} |
java | public static int compareTo(
ReadableInstant start, ReadableInstant end, ReadableInstant instant) {
if (instant.isBefore(start)) {
return 1;
}
if (end.isAfter(instant)) {
return 0;
}
return -1;
} |
python | def get_all_events(self):
"""Make a list of all events in the TRIPS EKB.
The events are stored in self.all_events.
"""
self.all_events = {}
events = self.tree.findall('EVENT')
events += self.tree.findall('CC')
for e in events:
event_id = e.attrib['id']
if event_id in self._static_events:
continue
event_type = e.find('type').text
try:
self.all_events[event_type].append(event_id)
except KeyError:
self.all_events[event_type] = [event_id] |
java | public static <T extends ImageGray<T>>
DenseOpticalFlow<T> region( @Nullable ConfigOpticalFlowBlockPyramid config , Class<T> imageType )
{
if( config == null )
config = new ConfigOpticalFlowBlockPyramid();
DenseOpticalFlowBlockPyramid<T> alg;
if( imageType == GrayU8.class )
alg = (DenseOpticalFlowBlockPyramid)new DenseOpticalFlowBlockPyramid.U8(
config.searchRadius,config.regionRadius,config.maxPerPixelError);
else if( imageType == GrayF32.class )
alg = (DenseOpticalFlowBlockPyramid)new DenseOpticalFlowBlockPyramid.F32(
config.searchRadius,config.regionRadius,config.maxPerPixelError);
else
throw new IllegalArgumentException("Unsupported image type "+imageType);
return new FlowBlock_to_DenseOpticalFlow<>(alg, config.pyramidScale, config.maxPyramidLayers, imageType);
} |
java | public static void main(String[] args) throws JMetalException {
DoubleProblem problem;
Algorithm<List<DoubleSolution>> algorithm;
MutationOperator<DoubleSolution> mutation;
String problemName;
if (args.length == 1) {
problemName = args[0];
} else {
problemName = "org.uma.jmetal.problem.multiobjective.zdt.ZDT1";
}
problem = (DoubleProblem) ProblemUtils.<DoubleSolution>loadProblem(problemName);
List<List<Double>> referencePoints;
referencePoints = new ArrayList<>();
referencePoints.add(Arrays.asList(0.2, 0.8));
double mutationProbability = 1.0 / problem.getNumberOfVariables();
double mutationDistributionIndex = 20.0;
mutation = new PolynomialMutation(mutationProbability, mutationDistributionIndex);
int maxIterations = 250;
int swarmSize = 100;
List<ArchiveWithReferencePoint<DoubleSolution>> archivesWithReferencePoints = new ArrayList<>();
for (int i = 0; i < referencePoints.size(); i++) {
archivesWithReferencePoints.add(
new CrowdingDistanceArchiveWithReferencePoint<DoubleSolution>(
swarmSize / referencePoints.size(), referencePoints.get(i)));
}
algorithm = new SMPSORP(problem,
swarmSize,
archivesWithReferencePoints,
referencePoints,
mutation,
maxIterations,
0.0, 1.0,
0.0, 1.0,
2.5, 1.5,
2.5, 1.5,
0.1, 0.1,
-1.0, -1.0,
new SequentialSolutionListEvaluator<>());
AlgorithmRunner algorithmRunner = new AlgorithmRunner.Executor(algorithm)
.execute();
List<DoubleSolution> population = algorithm.getResult();
long computingTime = algorithmRunner.getComputingTime();
JMetalLogger.logger.info("Total execution time: " + computingTime + "ms");
new SolutionListOutput(population)
.setSeparator("\t")
.setVarFileOutputContext(new DefaultFileOutputContext("VAR.tsv"))
.setFunFileOutputContext(new DefaultFileOutputContext("FUN.tsv"))
.print();
System.exit(0);
} |
python | def compound_crossspec(a_data, tbin, Df=None, pointProcess=False):
"""
Calculate cross spectra of compound signals.
a_data is a list of datasets (a_data = [data1,data2,...]).
For each dataset in a_data, the compound signal is calculated
and the crossspectra between these compound signals is computed.
If pointProcess=True, power spectra are normalized by the length T of the
time series.
Parameters
----------
a_data : list of numpy.ndarrays
Array: 1st axis unit, 2nd axis time.
tbin : float
Binsize in ms.
Df : float/None,
Window width of sliding rectangular filter (smoothing),
None -> no smoothing.
pointProcess : bool
If set to True, crossspectrum is normalized to signal length `T`
Returns
-------
freq : tuple
numpy.ndarray of frequencies.
CRO : tuple
3 dim numpy.ndarray; 1st axis first compound signal, 2nd axis second
compound signal, 3rd axis frequency.
Examples
--------
>>> compound_crossspec([np.array([analog_sig1, analog_sig2]),
np.array([analog_sig3,analog_sig4])], tbin, Df=Df)
Out[1]: (freq,CRO)
>>> CRO.shape
Out[2]: (2,2,len(analog_sig1))
"""
a_mdata = []
for data in a_data:
a_mdata.append(np.sum(data, axis=0)) # calculate compound signals
return crossspec(np.array(a_mdata), tbin, Df, units=False,
pointProcess=pointProcess) |
python | def apply_operation(op_stack, out_stack):
"""
Apply operation to the first 2 items of the output queue
op_stack Deque (reference)
out_stack Deque (reference)
"""
out_stack.append(calc(out_stack.pop(), out_stack.pop(), op_stack.pop())) |
java | public long getPositiveMillisOrDefault(HazelcastProperty property, long defaultValue) {
long millis = getMillis(property);
return millis > 0 ? millis : defaultValue;
} |
python | def from_inline(cls: Type[MembershipType], version: int, currency: str, membership_type: str,
inline: str) -> MembershipType:
"""
Return Membership instance from inline format
:param version: Version of the document
:param currency: Name of the currency
:param membership_type: "IN" or "OUT" to enter or exit membership
:param inline: Inline string format
:return:
"""
data = Membership.re_inline.match(inline)
if data is None:
raise MalformedDocumentError("Inline membership ({0})".format(inline))
issuer = data.group(1)
signature = data.group(2)
membership_ts = BlockUID.from_str(data.group(3))
identity_ts = BlockUID.from_str(data.group(4))
uid = data.group(5)
return cls(version, currency, issuer, membership_ts, membership_type, uid, identity_ts, signature) |
java | private void parseSubHierarchy(final Node parent, final AvroNode n) {
final Node parsed;
if (n.getPackageNode() != null) {
parsed = new PackageNodeImpl(parent, getString(n.getName()), getString(n.getFullName()));
} else if (n.getNamedParameterNode() != null) {
final AvroNamedParameterNode np = n.getNamedParameterNode();
parsed = new NamedParameterNodeImpl<>(parent, getString(n.getName()), getString(n.getFullName()),
getString(np.getFullArgClassName()), getString(np.getSimpleArgClassName()), np.getIsSet(), np.getIsList(),
getString(np.getDocumentation()), getString(np.getShortName()), getStringArray(np.getInstanceDefault()));
} else if (n.getClassNode() != null) {
final AvroClassNode cn = n.getClassNode();
final List<ConstructorDef<?>> injectableConstructors = new ArrayList<>();
final List<ConstructorDef<?>> allConstructors = new ArrayList<>();
for (final AvroConstructorDef injectable : cn.getInjectableConstructors()) {
final ConstructorDef<?> def = parseConstructorDef(injectable, true);
injectableConstructors.add(def);
allConstructors.add(def);
}
for (final AvroConstructorDef other : cn.getOtherConstructors()) {
final ConstructorDef<?> def = parseConstructorDef(other, false);
allConstructors.add(def);
}
@SuppressWarnings("unchecked")
final ConstructorDef<Object>[] dummy = new ConstructorDef[0];
parsed = new ClassNodeImpl<>(parent, getString(n.getName()), getString(n.getFullName()), cn.getIsUnit(),
cn.getIsInjectionCandidate(), cn.getIsExternalConstructor(), injectableConstructors.toArray(dummy),
allConstructors.toArray(dummy), getString(cn.getDefaultImplementation()));
} else {
throw new IllegalStateException("Bad avro node: got abstract node" + n);
}
for (final AvroNode child : n.getChildren()) {
parseSubHierarchy(parsed, child);
}
} |
python | def load_pdb(self, pdb_id, mapped_chains=None, pdb_file=None, file_type=None, is_experimental=True,
set_as_representative=False, representative_chain=None, force_rerun=False):
"""Load a structure ID and optional structure file into the structures attribute.
Args:
pdb_id (str): PDB ID
mapped_chains (str, list): Chain ID or list of IDs which you are interested in
pdb_file (str): Path to PDB file
file_type (str): Type of PDB file
is_experimental (bool): If this structure file is experimental
set_as_representative (bool): If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
PDBProp: The object that is now contained in the structures attribute
"""
if self.structures.has_id(pdb_id):
# Remove the structure if set to force rerun
if force_rerun:
existing = self.structures.get_by_id(pdb_id)
self.structures.remove(existing)
# Otherwise just retrieve it
else:
log.debug('{}: PDB ID already present in list of structures'.format(pdb_id))
pdb = self.structures.get_by_id(pdb_id)
if pdb_file:
pdb.load_structure_path(pdb_file, file_type)
if mapped_chains:
pdb.add_mapped_chain_ids(mapped_chains)
# Create a new StructProp entry
if not self.structures.has_id(pdb_id):
if is_experimental:
pdb = PDBProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
else:
pdb = StructProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
self.structures.append(pdb)
if set_as_representative:
# Parse structure so chains are stored before setting representative
pdb.parse_structure()
self._representative_structure_setter(structprop=pdb, keep_chain=representative_chain, force_rerun=force_rerun)
return self.structures.get_by_id(pdb_id) |
python | def __load_yml(self, stream):
"""Load yml stream into a dict object """
try:
return yaml.load(stream, Loader=yaml.SafeLoader)
except ValueError as e:
cause = "invalid yml format. %s" % str(e)
raise InvalidFormatError(cause=cause) |
python | def create_action(self):
"""Create actions associated with this widget.
Notes
-----
I think that this should be a function or a property.
The good thing about the property is that it is updated every time you
run it (for example, if you change some parameters in the settings).
The main drawback is that you cannot reference back to the QAction, as
it creates new ones every time.
"""
output = {}
act = QAction(QIcon(ICON['open_rec']), 'Open Dataset...', self)
act.setShortcut(QKeySequence.Open)
act.triggered.connect(self.open_dataset)
output['open_dataset'] = act
max_dataset_history = self.parent.value('max_dataset_history')
recent_recs = keep_recent_datasets(max_dataset_history)
act = []
for one_recent_rec in recent_recs:
act_recent = QAction(one_recent_rec, self)
act_recent.triggered.connect(partial(self.open_dataset,
one_recent_rec))
act.append(act_recent)
output['open_recent'] = act
act = QAction('Export dataset...', self)
act.triggered.connect(self.parent.show_export_dataset_dialog)
act.setEnabled(False)
output['export'] = act
self.action = output |
java | public Object readObject(String correlationId, ConfigParams parameters) throws ApplicationException {
if (_path == null)
throw new ConfigException(correlationId, "NO_PATH", "Missing config file path");
try {
Path path = Paths.get(_path);
String json = new String(Files.readAllBytes(path));
json = parameterize(json, parameters);
return jsonMapper.readValue(json, typeRef);
} catch (Exception ex) {
throw new FileException(correlationId, "READ_FAILED", "Failed reading configuration " + _path + ": " + ex)
.withDetails("path", _path).withCause(ex);
}
} |
java | private boolean checkSiVatId(final String pvatId) {
boolean checkSumOk;
final int checkSum = pvatId.charAt(9) - '0';
final int sum = (pvatId.charAt(2) - '0') * 8 + (pvatId.charAt(3) - '0') * 7
+ (pvatId.charAt(4) - '0') * 6 + (pvatId.charAt(5) - '0') * 5 + (pvatId.charAt(6) - '0') * 4
+ (pvatId.charAt(7) - '0') * 3 + (pvatId.charAt(8) - '0') * 2;
int calculatedCheckSum = MODULO_11 - sum % MODULO_11;
if (calculatedCheckSum == 11) {
checkSumOk = false;
} else {
if (calculatedCheckSum == 10) {
calculatedCheckSum = 0;
}
checkSumOk = checkSum == calculatedCheckSum;
}
return checkSumOk;
} |
python | def retinotopy_anchors(mesh, mdl,
polar_angle=None, eccentricity=None,
weight=None, weight_min=0.1,
field_sign_weight=0, field_sign=None, invert_field_sign=False,
radius_weight=0, radius_weight_source='Wandell2015', radius=None,
model_field_sign=None,
model_hemi=Ellipsis,
scale=1,
shape='Gaussian', suffix=None,
sigma=[0.1, 2.0, 8.0],
select='close'):
'''
retinotopy_anchors(mesh, model) is intended for use with the mesh_register function and the
retinotopy_model() function and/or the RetinotopyModel class; it yields a description of the
anchor points that tie relevant vertices the given mesh to points predicted by the given model
object. Any instance of the RetinotopyModel class should work as a model argument; this includes
SchiraModel objects as well as RetinotopyMeshModel objects such as those returned by the
retinotopy_model() function. If the model given is a string, then it is passed to the
retinotopy_model() function first.
Options:
* polar_angle (default None) specifies that the given data should be used in place of the
'polar_angle' or 'PRF_polar_angle' property values. The given argument must be numeric and
the same length as the the number of vertices in the mesh. If None is given, then the
property value of the mesh is used; if a list is given and any element is None, then the
weight for that vertex is treated as a zero. If the option is a string, then the property
value with the same name isused as the polar_angle data.
* eccentricity (default None) specifies that the given data should be used in places of the
'eccentricity' or 'PRF_eccentricity' property values. The eccentricity option is handled
virtually identically to the polar_angle option.
* weight (default None) specifies that the weight or scale of the data; this is handled
generally like the polar_angle and eccentricity options, but may also be 1, indicating that
all vertices with polar_angle and eccentricity values defined will be given a weight of 1.
If weight is left as None, then the function will check for 'weight',
'variance_explained', 'PRF_variance_explained', and 'retinotopy_weight' values and will use
the first found (in that order). If none of these is found, then a value of 1 is assumed.
* weight_min (default 0) specifies that the weight must be higher than the given value inn
order to be included in the fit; vertices with weights below this value have their weights
truncated to 0.
* scale (default 1) specifies a constant by which to multiply all weights for all anchors; the
value None is interpreted as 1.
* shape (default 'Gaussian') specifies the shape of the potential function (see mesh_register)
* model_hemi (default: None) specifies the hemisphere of the model to load; if None, then
looks for a non-specific model.
* suffix (default None) specifies any additional arguments that should be appended to the
potential function description list that is produced by this function; i.e., the
retinotopy_anchors function produces a list, and the contents of suffix, if given and not
None, are appended to that list (see mesh_register).
* select (default 'close') specifies a function that will be called with two arguments for
every vertex given an anchor; the arguments are the vertex label and the matrix of anchors.
The function should return a list of anchors to use for the label (None is equivalent to
lambda id,anc: anc). The parameter may alternately be specified using the string 'close':
select=['close', [k]] indicates that any anchor more than k times the average edge-length in
the mesh should be excluded; a value of just ['close', k] on the other hand indicates that
any anchor more than k distance from the vertex should be exlcuded. The default value,
'close', is equivalent to ['close', [40]].
* sigma (default [0.1, 2.0, 4.0]) specifies how the sigma parameter should be handled; if
None, then no sigma value is specified; if a single number, then all sigma values are
assigned that value; if a list of three numbers, then the first is the minimum sigma value,
the second is the fraction of the minimum distance between paired anchor points, and the
last is the maximum sigma --- the idea with this form of the argument is that the ideal
sigma value in many cases is approximately 0.25 to 0.5 times the distance between anchors
to which a single vertex is attracted; for any anchor a to which a vertex u is attracted,
the sigma of a is the middle sigma-argument value times the minimum distance from a to all
other anchors to which u is attracted (clipped by the min and max sigma).
* field_sign_weight (default: 0) specifies the amount of weight that should be put on the
retinotopic field of the model as a method of attenuating the weights on those anchors whose
empirical retinotopic values and predicted model locations do not match. The weight that
results is calculated from the difference in empirical field-sign for each vertex and the
visual area field sign based on the labels in the model. The higher the field-sign weight,
(approaching 1) the more the resulting value is a geometric mean of the field-sign-based
weight and the original weights. As this value approaches 0, the resulting weights are more
like the original weights.
* radius_weight (default: 0) specifies the amount of weight that should be put on the
receptive field radius of the model as a method of attenuating the weights on those anchors
whose empirical retinotopic values and predicted model locations do not match. The weight
that results is calculated from the difference in empirical RF radius for each vertex and
the predicted RF radius based on the labels in the model. The higher the radius weight,
(approaching 1) the more the resulting value is a geometric mean of the field-sign-based
weight and the original weights. As this value approaches 0, the resulting weights are more
like the original weights.
* radius_weight_source (default: 'Wandell2015') specifies the source for predicting RF radius;
based on eccentricity and visual area label.
Example:
# The retinotopy_anchors function is intended for use with mesh_register, as follows:
# Define our Schira Model:
model = neuropythy.registration.SchiraModel()
# Make sure our mesh has polar angle, eccentricity, and weight data:
mesh.prop('polar_angle', polar_angle_vertex_data);
mesh.prop('eccentricity', eccentricity_vertex_data);
mesh.prop('weight', variance_explained_vertex_data);
# register the mesh using the retinotopy and model:
registered_mesh = neuropythy.registration.mesh_register(
mesh,
['mesh', retinotopy_anchors(mesh, model)],
max_step_size=0.05,
max_steps=2000)
'''
if pimms.is_str(mdl):
hemi = None
if pimms.is_str(model_hemi):
model_hemi = model_hemi.upper()
hemnames = {k:h
for (h,als) in [('LH', ['LH','L','LEFT','RHX','RX']),
('RH', ['RH','R','RIGHT','LHX','LX'])]
for k in als}
if model_hemi in hemnames: hemi = hemnames[model_hemi]
else: raise ValueError('Unrecognized hemisphere name: %s' % model_hemi)
elif model_hemi is not None:
raise ValueError('model_hemi must be a string, Ellipsis, or None')
mdl = retinotopy_model(mdl, hemi=hemi)
if not isinstance(mdl, RetinotopyModel):
raise RuntimeError('given model is not a RetinotopyModel instance!')
if not isinstance(mesh, geo.Mesh):
raise RuntimeError('given mesh is not a Mesh object!')
n = mesh.vertex_count
X = mesh.coordinates.T
if weight_min is None: weight_min = 0
# make sure we have our polar angle/eccen/weight values:
# (weight is odd because it might be a single number, so handle that first)
(polar_angle, eccentricity, weight) = [
extract_retinotopy_argument(mesh, name, arg, default='empirical')
for (name, arg) in [
('polar_angle', polar_angle),
('eccentricity', eccentricity),
('weight', np.full(n, weight) if pimms.is_number(weight) else weight)]]
# Make sure they contain no None/invalid values
(polar_angle, eccentricity, weight) = _retinotopy_vectors_to_float(
polar_angle, eccentricity, weight,
weight_min=weight_min)
if np.sum(weight > 0) == 0:
raise ValueError('No positive weights found')
idcs = np.where(weight > 0)[0]
# Interpret the select arg if necessary (but don't apply it yet)
select = ['close', [40]] if select == 'close' else \
['close', [40]] if select == ['close'] else \
select
if select is None:
select = lambda a,b: b
elif ((pimms.is_vector(select) or is_list(select) or is_tuple(select))
and len(select) == 2 and select[0] == 'close'):
if pimms.is_vector(select[1]): d = np.mean(mesh.edge_lengths) * select[1][0]
else: d = select[1]
select = lambda idx,ancs: [a for a in ancs if a[0] is not None if npla.norm(X[idx] - a) < d]
# Okay, apply the model:
res = mdl.angle_to_cortex(polar_angle[idcs], eccentricity[idcs])
oks = np.isfinite(np.sum(np.reshape(res, (res.shape[0], -1)), axis=1))
# Organize the data; trim out those not selected
data = [[[i for _ in r], r, [ksidx[tuple(a)] for a in r]]
for (i,r0,ok) in zip(idcs, res, oks) if ok
for ksidx in [{tuple(a):(k+1) for (k,a) in enumerate(r0)}]
for r in [select(i, r0)]
if len(r) > 0]
# Flatten out the data into arguments for Java
idcs = [int(i) for d in data for i in d[0]]
ancs = np.asarray([pt for d in data for pt in d[1]]).T
labs = np.asarray([ii for d in data for ii in d[2]]).T
# Get just the relevant weights and the scale
wgts = np.asarray(weight[idcs] * (1 if scale is None else scale))
# add in the field-sign weights and radius weights if requested here;
if not np.isclose(field_sign_weight, 0) and mdl.area_name_to_id is not None:
id2n = mdl.area_id_to_name
if field_sign is True or field_sign is Ellipsis or field_sign is None:
from .cmag import cmag
r = {'polar_angle': polar_angle, 'eccentricity': eccentricity}
#field_sign = retinotopic_field_sign(mesh, retinotopy=r)
field_sign = cmag(mesh, r)['field_sign']
elif pimms.is_str(field_sign): field_sign = mesh.prop(field_sign)
field_sign = np.asarray(field_sign)
if invert_field_sign: field_sign = -field_sign
fswgts = 1.0 - 0.25 * np.asarray(
[(fs - visual_area_field_signs[id2n[l]]) if l in id2n else 0
for (l,fs) in zip(labs,field_sign[idcs])])**2
# average the weights at some fraction with the original weights
fswgts = field_sign_weight*fswgts + (1 - field_sign_weight)*wgts
else: fswgts = None
# add in radius weights if requested as well
if not np.isclose(radius_weight, 0) and mdl.area_name_to_id is not None:
id2n = mdl.area_id_to_name
emprad = extract_retinotopy_argument(mesh, 'radius', radius, default='empirical')
emprad = emprad[idcs]
emprad = np.argsort(np.argsort(emprad)) * (1.0 / len(emprad)) - 0.5
eccs = eccentricity[idcs]
prerad = np.asarray([predict_pRF_radius(ecc, id2n[lbl], source=radius_weight_source)
for (ecc,lbl) in zip(eccs,labs)])
prerad = np.argsort(np.argsort(prerad)) * (1.0 / len(prerad)) - 0.5
rdwgts = 1.0 - (emprad - prerad)**2
# average the weights at some fraction with the original weights
rdwgts = radius_weight*rdwgts + (1-radius_weight)*wgts
else: rdwgts = None
# apply the weights
if fswgts is not None:
if rdwgts is not None: wgts = np.power(fswgts*rdwgts*wgts, 1.0/3.0)
else: wgts = np.sqrt(fswgts*wgts)
elif rdwgts is not None: wgts = np.sqrt(rdwgts*wgts)
# Figure out the sigma parameter:
if sigma is None: sigs = None
elif pimms.is_number(sigma): sigs = sigma
elif pimms.is_vector(sigma) and len(sigma) == 3:
[minsig, mult, maxsig] = sigma
sigs = np.clip(
[mult*min([npla.norm(a0 - a) for a in anchs if a is not a0]) if len(iii) > 1 else maxsig
for (iii,anchs,_) in data
for a0 in anchs],
minsig, maxsig)
else:
raise ValueError('sigma must be a number or a list of 3 numbers')
# okay, we've partially parsed the data that was given; now we can construct the final list of
# instructions:
tmp = (['anchor', shape,
np.asarray(idcs, dtype=np.int),
np.asarray(ancs, dtype=np.float64),
'scale', np.asarray(wgts, dtype=np.float64)]
+ ([] if sigs is None else ['sigma', sigs])
+ ([] if suffix is None else suffix))
return tmp |
java | public static long[] concatArrays(List<long[]> arrays, int start, int size) {
long[] result = new long[size];
// How many values we still need to move over
int howManyLeft = size;
// Where in the resulting array we're currently bulk-writing
int targetPosition = 0;
// Where we're copying *from*, in (one of) the source array.
// Typically 0, except maybe for the first array in the list
int from = start;
for (int i = 0; i < arrays.size() && howManyLeft > 0; i++) {
long[] current = arrays.get(i);
// Can't copy more than the current source array size, or the grand total pointer
int howManyThisRound = Math.min(current.length - from, howManyLeft);
System.arraycopy(current, from, result, targetPosition, howManyThisRound);
from = 0;
howManyLeft -= howManyThisRound;
targetPosition += howManyThisRound;
}
// If this is non-zero here, means we were asked to copy more than what we were provided
if (howManyLeft > 0) {
throw new ArrayIndexOutOfBoundsException(
String.format("Not enough data, short of %d elements", howManyLeft));
}
return result;
} |
java | public static Object[] boxAll(Object src, int srcPos, int len) {
switch (tId(src.getClass())) {
case I_BOOLEAN: return box((boolean[]) src, srcPos, len);
case I_BYTE: return box((byte[]) src, srcPos, len);
case I_CHARACTER: return box((char[]) src, srcPos, len);
case I_DOUBLE: return box((double[]) src, srcPos, len);
case I_FLOAT: return box((float[]) src, srcPos, len);
case I_INTEGER: return box((int[]) src, srcPos, len);
case I_LONG: return box((long[]) src, srcPos, len);
case I_SHORT: return box((short[]) src, srcPos, len);
}
throw new IllegalArgumentException("No primitive array: " + src);
} |
python | def connect_patch_namespaced_pod_proxy(self, name, namespace, **kwargs):
"""
connect PATCH requests to proxy of Pod
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_patch_namespaced_pod_proxy(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodProxyOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str path: Path is the URL path to use for the current proxy request to pod.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_patch_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs)
else:
(data) = self.connect_patch_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs)
return data |
python | def standard_sc_expr_str(sc):
"""
Standard symbol/choice printing function. Uses plain Kconfig syntax, and
displays choices as <choice> (or <choice NAME>, for named choices).
See expr_str().
"""
if sc.__class__ is Symbol:
return '"{}"'.format(escape(sc.name)) if sc.is_constant else sc.name
# Choice
return "<choice {}>".format(sc.name) if sc.name else "<choice>" |
java | @SuppressWarnings("unchecked")
public T add(Iterable<Link> links) {
Assert.notNull(links, "Given links must not be null!");
links.forEach(this::add);
return (T) this;
} |
java | public static spilloveraction[] get(nitro_service service, options option) throws Exception{
spilloveraction obj = new spilloveraction();
spilloveraction[] response = (spilloveraction[])obj.get_resources(service,option);
return response;
} |
java | private void verifyReplacementRange(SegmentWithRange replacedSegment, StreamSegmentsWithPredecessors replacementSegments) {
log.debug("Verification of replacement segments {} with the current segments {}", replacementSegments, segments);
Map<Long, List<SegmentWithRange>> replacementRanges = replacementSegments.getReplacementRanges();
List<SegmentWithRange> replacements = replacementRanges.get(replacedSegment.getSegment().getSegmentId());
Preconditions.checkArgument(replacements != null, "Replacement segments did not contain replacements for segment being replaced");
if (replacementRanges.size() == 1) {
//Simple split
Preconditions.checkArgument(replacedSegment.getHigh() == getUpperBound(replacements));
Preconditions.checkArgument(replacedSegment.getLow() == getLowerBound(replacements));
} else {
Preconditions.checkArgument(replacedSegment.getHigh() <= getUpperBound(replacements));
Preconditions.checkArgument(replacedSegment.getLow() >= getLowerBound(replacements));
}
for (Entry<Long, List<SegmentWithRange>> ranges : replacementRanges.entrySet()) {
Entry<Double, SegmentWithRange> upperReplacedSegment = segments.floorEntry(getUpperBound(ranges.getValue()));
Entry<Double, SegmentWithRange> lowerReplacedSegment = segments.higherEntry(getLowerBound(ranges.getValue()));
Preconditions.checkArgument(upperReplacedSegment != null, "Missing replaced replacement segments %s",
replacementSegments);
Preconditions.checkArgument(lowerReplacedSegment != null, "Missing replaced replacement segments %s",
replacementSegments);
}
} |
python | def login(self, username, password):
"""Log in to Betfair. Sets `session_token` if successful.
:param str username: Username
:param str password: Password
:raises: BetfairLoginError
"""
response = self.session.post(
os.path.join(self.identity_url, 'certlogin'),
cert=self.cert_file,
data=urllib.urlencode({
'username': username,
'password': password,
}),
headers={
'X-Application': self.app_key,
'Content-Type': 'application/x-www-form-urlencoded',
},
timeout=self.timeout,
)
utils.check_status_code(response, [httplib.OK])
data = response.json()
if data.get('loginStatus') != 'SUCCESS':
raise exceptions.LoginError(response, data)
self.session_token = data['sessionToken'] |
python | def delete_cluster_role(self, name, **kwargs):
"""
delete a ClusterRole
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_cluster_role(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ClusterRole (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_cluster_role_with_http_info(name, **kwargs)
else:
(data) = self.delete_cluster_role_with_http_info(name, **kwargs)
return data |
java | public void selectValues(String[] expectedValues, double seconds) {
double end = System.currentTimeMillis() + (seconds * 1000);
try {
elementPresent(seconds);
if (!element.is().select()) {
throw new TimeoutException(ELEMENT_NOT_SELECT);
}
while (!Arrays.toString(element.get().selectValues()).equals(Arrays.toString(expectedValues)) && System.currentTimeMillis() < end) ;
double timeTook = Math.min((seconds * 1000) - (end - System.currentTimeMillis()), seconds * 1000) / 1000;
checkSelectValues(expectedValues, seconds, timeTook);
} catch (TimeoutException e) {
checkSelectValues(expectedValues, seconds, seconds);
}
} |
java | @XmlElementDecl(namespace = "http://www.drugbank.ca", name = "defining-change", scope = SnpEffectType.class)
public JAXBElement<String> createSnpEffectTypeDefiningChange(String value) {
return new JAXBElement<String>(_SnpEffectTypeDefiningChange_QNAME, String.class, SnpEffectType.class, value);
} |
java | public IpcLogEntry addTag(String k, String v) {
this.additionalTags.put(k, v);
return this;
} |
python | def collect_yarn_application_diagnostics(self, *application_ids):
"""
DEPRECATED: use create_yarn_application_diagnostics_bundle on the Yarn service. Deprecated since v10.
Collects the Diagnostics data for Yarn applications.
@param application_ids: An array of strings containing the ids of the
yarn applications.
@return: Reference to the submitted command.
@since: API v8
"""
args = dict(applicationIds = application_ids)
return self._cmd('yarnApplicationDiagnosticsCollection', api_version=8, data=args) |
java | @Override
public Name addNameToPerson(final Person person, final String string) {
if (person == null || string == null) {
return new Name();
}
final Name name = new Name(person, string);
person.insert(name);
return name;
} |
java | public static JavaPairRDD<RowColumn, Bytes> toPairRDD(JavaRDD<RowColumnValue> rcvRDD) {
return rcvRDD.mapToPair(rcv -> new Tuple2<>(rcv.getRowColumn(), rcv.getValue()));
} |
java | public static <D> Predicate notIn(Expression<D> left, SubQueryExpression<? extends D> right) {
return predicate(Ops.NOT_IN, left, right);
} |
java | @Override
public <T> Flowable<T> get(@Nonnull ResultSetMapper<? extends T> mapper) {
Preconditions.checkNotNull(mapper, "mapper cannot be null");
return update.startWithDependency(Update.<T>createReturnGeneratedKeys(update.connections,
update.parameterGroupsToFlowable(), update.sql, mapper, true));
} |
java | static String maybeEscapeElementValue(final String pValue) {
int startEscape = needsEscapeElement(pValue);
if (startEscape < 0) {
// If no escaping is needed, simply return original
return pValue;
}
else {
// Otherwise, start replacing
StringBuilder builder = new StringBuilder(pValue.substring(0, startEscape));
builder.ensureCapacity(pValue.length() + 30);
int pos = startEscape;
for (int i = pos; i < pValue.length(); i++) {
switch (pValue.charAt(i)) {
case '&':
pos = appendAndEscape(pValue, pos, i, builder, "&");
break;
case '<':
pos = appendAndEscape(pValue, pos, i, builder, "<");
break;
case '>':
pos = appendAndEscape(pValue, pos, i, builder, ">");
break;
//case '\'':
//case '"':
default:
break;
}
}
builder.append(pValue.substring(pos));
return builder.toString();
}
} |
java | private static ResourceCache loadResourceCache() {
try {
return Optional.ofNullable(Bootstrap.getService(ResourceCache.class)).orElseGet(
DefaultResourceCache::new);
} catch (Exception e) {
LOG.log(Level.SEVERE, "Error loading ResourceCache instance.", e);
return new DefaultResourceCache();
}
} |
java | public Effects animate(Object stringOrProperties, int duration, Function... funcs) {
return animate(stringOrProperties, duration, EasingCurve.linear, funcs);
} |
python | def retrieve_old_notifications(self):
"""
Retrieve notifications older than X days, where X is specified in settings
"""
date = ago(days=DELETE_OLD)
return Notification.objects.filter(added__lte=date) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.